diff --git a/.github/workflows/format.yaml b/.github/workflows/format.yaml
new file mode 100644
index 0000000..16a781b
--- /dev/null
+++ b/.github/workflows/format.yaml
@@ -0,0 +1,25 @@
+# .github/workflows/gofmt.yml
+# .github/workflows/lint.yml
+name: Lint Check
+
+on:
+ push:
+ branches:
+ - '**'
+
+jobs:
+ lint:
+ name: Run make lint
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version: '1.24.2'
+
+ - name: Run lint
+ run: make lint
diff --git a/.gitignore b/.gitignore
index 4d17374..ab5c08e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,5 @@
-# Binary files
-/bin/
+# Debug logs
+/logs/
# for macOS dev environments
.DS_Store
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..d7eeb90
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,29 @@
+# --- Build stage ---
+FROM golang:latest AS builder
+
+WORKDIR /app
+
+COPY go.mod go.sum ./
+RUN go mod download
+
+COPY . .
+
+RUN make
+
+# --- Final image ---
+FROM debian:latest
+
+WORKDIR /app
+
+# Copy only the built binary
+COPY --from=builder /app/bin/laclm ./bin/laclm
+
+# Install bash in case needed
+# RUN apt-get update && apt-get install -y bash && rm -rf /var/lib/apt/lists/*
+
+RUN apt-get update && apt-get install -y bash acl && rm -rf /var/lib/apt/lists/*
+
+EXPOSE 8080
+
+# Default command to run your Go app
+CMD ["./bin/laclm", "--config", "config.yaml"]
diff --git a/Makefile b/Makefile
index a624622..c4cc897 100644
--- a/Makefile
+++ b/Makefile
@@ -1,29 +1,47 @@
-APP_NAME = laclm
-CMD_DIR = ./cmd/$(APP_NAME)
-BIN_DIR = ./bin
-BIN_PATH = $(BIN_DIR)/$(APP_NAME)
+APP_NAME := laclm
+CMD_DIR := ./cmd/$(APP_NAME)
+BIN_DIR := ./bin
+BUILD_DIR := ./build
GOFILES := $(shell find . -name '*.go' -type f)
-.PHONY: all build clean run test lint build-linux build-mac build-win
+# Target platforms: OS_ARCH
+TARGETS := \
+ linux_amd64 \
+ linux_arm64
+.PHONY: all build build-cross clean run test lint vendor package
+
+## Default target
all: build
-## Build the app
-build: $(GOFILES)
+## Build for local OS/arch using vendored deps
+build: vendor $(GOFILES)
@echo "Building $(APP_NAME)..."
@mkdir -p $(BIN_DIR)
- go build -o $(BIN_PATH) $(CMD_DIR)
+ GOOS="" GOARCH="" go build -mod=vendor -o $(BIN_DIR)/$(APP_NAME) $(CMD_DIR)
+
+## Build cross-compiled binaries for all Linux targets
+build-cross: vendor $(GOFILES)
+ @echo "Cross building for targets: $(TARGETS)"
+ @mkdir -p $(BIN_DIR)
+ @for target in $(TARGETS); do \
+ OS=$${target%_*}; \
+ ARCH=$${target#*_}; \
+ OUT=$(BIN_DIR)/$(APP_NAME)-$$OS-$$ARCH; \
+ echo "Building $$OUT..."; \
+ GOOS=$$OS GOARCH=$$ARCH go build -mod=vendor -o $$OUT $(CMD_DIR); \
+ done
## Run the app
run: build
@echo "Running $(APP_NAME)..."
- @$(BIN_PATH)
+ @$(BIN_DIR)/$(APP_NAME)
-## Clean build artifacts
+## Clean build and package directories
clean:
@echo "Cleaning..."
- @rm -rf $(BIN_DIR)
+ @rm -rf $(BIN_DIR) $(BUILD_DIR) vendor
## Run tests
test:
@@ -32,9 +50,32 @@ test:
## Lint (requires golangci-lint)
lint:
+ @if ! command -v golangci-lint >/dev/null 2>&1; then \
+ echo "Installing golangci-lint..."; \
+ go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest; \
+ fi
@echo "Linting..."
@golangci-lint run
-## Cross-build for Linux
-build-linux:
- GOOS=linux GOARCH=amd64 go build -o $(BIN_DIR)/$(APP_NAME)-linux $(CMD_DIR)
+## Vendor dependencies
+vendor:
+ @echo "Vendoring dependencies..."
+ go mod vendor
+
+## Package full project source (with vendor) for each target
+package: clean vendor
+ @echo "Packaging full source tarballs for: $(TARGETS)"
+ @mkdir -p $(BUILD_DIR)
+ @for target in $(TARGETS); do \
+ OS=$${target%_*}; \
+ ARCH=$${target#*_}; \
+ NAME=$(APP_NAME)-$$OS-$$ARCH; \
+ TARBALL=$$NAME-source.tar.gz; \
+ echo "Creating $$TARBALL..."; \
+ mkdir -p tmp/$$NAME; \
+ cp -r * tmp/$$NAME; \
+ rm -rf tmp/$$NAME/$(BUILD_DIR) tmp/$$NAME/$(BIN_DIR); \
+ gtar -czf $(BUILD_DIR)/$$TARBALL -C tmp $$NAME; \
+ rm -rf tmp/$$NAME; \
+ done
+ @rm -rf tmp
diff --git a/README.md b/README.md
index 6701792..a46239a 100644
--- a/README.md
+++ b/README.md
@@ -1,34 +1,152 @@
-# Backend Component - Linux ACL Management Interface
+
-Securing Linux Storage with ACLs: An Open-Source Web Management Interface for Enhanced Data Protection.
+# Linux ACL Management Interface - Backend Component
-Progress Docs: https://pythonhacker24.github.io/linux-acl-management/
+
-## Documentation
+A robust web-based management interface for Linux Access Control Lists (ACLs), designed to enhance data protection and simplify ACL administration. This project provides a modern, user-friendly solution for managing file system permissions in Linux environments.
-To be written ...
+[](https://opensource.org/licenses/MIT)
+[View Documentation](https://pythonhacker24.github.io/linux-acl-management/)
-## Progress Report
+
-To be written ...
+## Project Summary
+
+Institutional departments, such as the Biomedical Informatics (BMI) Department of Emory University School of Medicine, manage vast amounts of data, often reaching petabyte scales across multiple Linux-based storage servers. Researchers storing data in these systems need a streamlined way to modify ACLs to grant or revoke access for collaborators. Currently, the IT team at BMI is responsible for manually handling these ACL modifications, which is time-consuming, error-prone, and inefficient, especially as data volume and user demands grow. To address this challenge at BMI and similar institutions worldwide, a Web Management Interface is needed to allow users to modify ACLs securely. This solution would eliminate the burden on IT teams by enabling on-demand permission management while ensuring security and reliability. The proposed system will feature a robust and highly configurable backend, high-speed databases, orchestration daemons for file storage servers, and an intuitive frontend. The proposal includes an in-depth analysis of required components, high-level and low-level design considerations, technology selection, and the demonstration of a functional prototype as proof of concept. The goal is to deliver a production-ready, secure, scalable, and reliable system for managing ACLs across multiple servers hosting filesystems such as NFS, BeeGFS, and others. This solution will streamline access control management and prepare it for deployment at BMI and other institutions worldwide, significantly reducing the manual workload for IT teams.
+
+## Features
+
+- Intuitive web interface for ACL management
+- High-performance backend written in Go
+- Real-time ACL updates
+- Comprehensive ACL reporting and visualization
+- Integration with OpenLDAP for authentication
+
+## Quick Start
+
+### Prerequisites
+
+- Go 1.20 or higher
+- Docker (optional)
+- Redis
+- OpenLDAP server
+
+### Local Installation
+
+1. Clone the repository:
+ ```bash
+ git clone https://github.com/PythonHacker24/linux-acl-management.git
+ cd linux-acl-management
+ ```
+
+2. Install dependencies:
+ ```bash
+ go mod download
+ ```
+
+3. Build the application:
+ ```bash
+ go build -o acl-manager
+ ```
+
+### Production Build
+
+For production build, it is recommended to use the Makefile. This allows you to build the complete binary on locally for security purposes. Since the project is in development mode, complete local build is not possible since dependencies are managed via GitHub and external vendors. Tarball based complete local builds will be developed in later stages.
+
+1. Clone the repository:
+ ```bash
+ git clone https://github.com/yourusername/linux-acl-management.git
+ cd linux-acl-management
+ ```
+
+2. Use make:
+ ```bash
+ make build
+ ```
+
+3. Execute the binary
+ ```bash
+ ./bin/laclm --config config.yaml
+ ```
+
+### Docker Testbench Deployment
+
+A simulated environment has been developed on docker-compose for testing and experimenting purposes. It's not a production level build but a training ground for testing your config.yaml file for specific scenario.
+
+```bash
+docker-compose up -d
+```
+
+A complete optional Docker based deployment option will be developed in later stages of development
+
+## Usage
+
+1. Configure your settings in `config.yaml`
+
+2. Start the server:
+ ```bash
+ ./laclm --config
+ ```
+
+3. Access the api at `http://:`
+
+For detailed usage instructions, please refer to our [documentation](https://pythonhacker24.github.io/linux-acl-management/).
+
+## Project Structure
+
+```
+.
+├── cmd/ # Application entry points
+├── internal/ # Private application code
+├── pkg/ # Public library code
+├── api/ # API definitions and handlers
+├── docs/ # Documentation
+└── deployments/ # Deployment configurations
+```
+
+## Development
+
+### Branches
+
+- `main`: Production-ready code
+- `development-v`: Development branches for specific versions
+
+### Contributing
+
+1. Fork the repository
+2. Create your feature branch (`git checkout -b feature/amazing-feature`)
+3. Commit your changes (`git commit -m 'Add some amazing feature'`)
+4. Push to the branch (`git push origin feature/amazing-feature`)
+5. Open a Pull Request
+
+Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct and development process.
## About
-- **Organization:** Department of Biomedical Informatics, Emory University
-- **Program:** Google Summer of Code 2025
-- **Contributor:** Aditya Patil
-- **Mentors:** Robert Tweedy, Mahmoud Zeydabadinezhad, PhD
+This project is developed as part of Google Summer of Code 2025, in collaboration with the Department of Biomedical Informatics at Emory University.
-This project is part of Google Summer of Code 2025, undertaken with the Department of Biomedical Informatics at Emory University.
+### Team
-## Technologies Used
+- **Contributor:** Aditya Patil
+- **Mentors:**
+ - Robert Tweedy
+ - Mahmoud Zeydabadinezhad, PhD
+
+### Technologies
-- **Programming Languages:** Golang
-- **Frameworks/Libraries:** net/http
-- **Standards/Protocols:** gRPC, REST
-- **Tools:** Tarball, Redis, Docker, OpenLDAP
+- **Backend:** Golang, net/http
+- **API:** gRPC, REST
+- **Infrastructure:** Docker, Redis, OpenLDAP
+- **Packaging:** Tarball
## License
-This project is licensed under the MIT License - see the LICENSE file for details.
+This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
+
+## Acknowledgments
+
+- Department of Biomedical Informatics, Emory University
+- Google Summer of Code Program
+- Open Source Community
diff --git a/api/middleware/middleware.go b/api/middleware/middleware.go
index e69de29..19ddb98 100644
--- a/api/middleware/middleware.go
+++ b/api/middleware/middleware.go
@@ -0,0 +1,155 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "strings"
+ "time"
+
+ "go.uber.org/zap"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/token"
+)
+
+/* logging middleware for http requests */
+func LoggingMiddleware(next http.HandlerFunc) http.HandlerFunc {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
+
+ /* logging recieved request at the instant of receiving */
+ zap.L().Info("Recieved request",
+ zap.String("Method", r.Method),
+ zap.String("Path", r.URL.Path),
+ )
+
+ /* return the handler */
+ next(w, r)
+
+ /* logging time taken by the request */
+ zap.L().Info("Request completed",
+ zap.String("Method", r.Method),
+ zap.String("Path", r.URL.Path),
+ zap.Duration("Duration", time.Since(start)),
+ )
+ })
+}
+
+/*
+authentication middleware for http requests
+return username and sessionID with context
+*/
+func AuthenticationMiddleware(next http.HandlerFunc) http.HandlerFunc {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ /* authenticate the request through JWT */
+ username, sessionID, err := token.ExtractDataFromRequest(r)
+ if err != nil {
+ zap.L().Info("Error during authentication",
+ zap.Error(err),
+ )
+ http.Error(w, "Authentication Failed", http.StatusInternalServerError)
+ return
+ }
+
+ /* set the header with the username */
+ r.Header.Set("X-User", username)
+
+ /* pass username and sessionID as context */
+ ctx := context.WithValue(r.Context(), ContextKeyUsername, username)
+ ctx = context.WithValue(ctx, ContextKeySessionID, sessionID)
+
+ /* return the handler */
+ next(w, r.WithContext(ctx))
+ })
+}
+
+/*
+handles CORS headers
+*/
+func CORSMiddleware(next http.HandlerFunc, allowedOrigins []string, allowedMethods []string, allowedHeaders []string) http.HandlerFunc {
+ /* select all allowed origins */
+ originMap := make(map[string]bool)
+ for _, o := range allowedOrigins {
+ originMap[o] = true
+ }
+
+ /* extract methods and origin */
+ methods := strings.Join(allowedMethods, ", ")
+ headers := strings.Join(allowedHeaders, ", ")
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ /* get the origin header */
+ origin := r.Header.Get("Origin")
+ /* set appropriate CORS header */
+ if origin != "" && (originMap["*"] || originMap[origin]) {
+ w.Header().Set("Access-Control-Allow-Origin", origin)
+ w.Header().Set("Vary", "Origin")
+ w.Header().Set("Access-Control-Allow-Methods", methods)
+
+ /* determine final allowed headers
+ If configured as "*", echo back the requested headers to ensure
+ headers like Authorization are explicitly allowed.
+ */
+ finalAllowedHeaders := headers
+ if strings.Contains(headers, "*") {
+ requestedHeaders := r.Header.Get("Access-Control-Request-Headers")
+ if requestedHeaders != "" {
+ finalAllowedHeaders = requestedHeaders
+ } else {
+ /* sensible defaults when no specific request headers are provided */
+ finalAllowedHeaders = "Authorization, Content-Type"
+ }
+ }
+ w.Header().Set("Access-Control-Allow-Headers", finalAllowedHeaders)
+
+ w.Header().Set("Access-Control-Allow-Credentials", "true")
+ }
+
+ /* handle preflight (OPTIONS) requests */
+ if r.Method == http.MethodOptions {
+ w.WriteHeader(http.StatusNoContent)
+ return
+ }
+
+ /* call the next handler for non-OPTIONS requests */
+ next(w, r)
+ })
+}
+
+/*
+authentication middleware for http requests with query
+return username and sessionID with context
+*/
+func AuthenticationQueryMiddleware(next http.HandlerFunc) http.HandlerFunc {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ /* get the HTTP query */
+ query := r.URL.Query()
+
+ /* get the token query */
+ tokenQ := query.Get("token")
+ if tokenQ == "" {
+ zap.L().Info("Query authentication without token value")
+ http.Error(w, "Missing 'token' query parameter value", http.StatusBadRequest)
+ return
+ }
+
+ /* extract username and sessionID from the token */
+ username, sessionID, err := token.GetDataFromJWT(tokenQ)
+ if err != nil {
+ zap.L().Info("Error during authentication",
+ zap.Error(err),
+ )
+ http.Error(w, "Authentication Failed", http.StatusInternalServerError)
+ return
+ }
+
+ /* set the header with the username */
+ r.Header.Set("X-User", username)
+
+ /* pass username and sessionID as context */
+ ctx := context.WithValue(r.Context(), ContextKeyUsername, username)
+ ctx = context.WithValue(ctx, ContextKeySessionID, sessionID)
+
+ /* return the handler */
+ next(w, r.WithContext(ctx))
+ })
+}
diff --git a/api/middleware/model.go b/api/middleware/model.go
new file mode 100644
index 0000000..206ba2d
--- /dev/null
+++ b/api/middleware/model.go
@@ -0,0 +1,10 @@
+package middleware
+
+/* contextKey type for middleware context value passing */
+type contextKey string
+
+/* defining contextKey types */
+const (
+ ContextKeyUsername contextKey = "username"
+ ContextKeySessionID contextKey = "session_id"
+)
diff --git a/api/routes/routes.go b/api/routes/routes.go
index e69de29..867fa30 100644
--- a/api/routes/routes.go
+++ b/api/routes/routes.go
@@ -0,0 +1,325 @@
+package routes
+
+import (
+ "net/http"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/api/middleware"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/auth"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/health"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/search"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/session"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/traversal"
+)
+
+/* all routes for all features are registered here */
+func RegisterRoutes(mux *http.ServeMux, sessionManager *session.Manager) {
+
+ /* move it to config file */
+ allowedOrigin := []string{"http://localhost:3000"}
+ allowedMethods := []string{"GET", "POST", "OPTIONS"}
+ allowedHeaders := []string{"*"}
+
+ /* for monitoring the state of overall server and laclm backend */
+ mux.Handle("GET /health", http.HandlerFunc(
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(health.HealthHandler),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ ))
+
+ /* handle OPTIONS preflight requests for /health */
+ mux.HandleFunc("OPTIONS /health",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* for logging into the backend and creating a session */
+ mux.HandleFunc("POST /auth/login",
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ auth.LoginHandler(sessionManager),
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* handle OPTIONS preflight requests for /auth/login */
+ mux.HandleFunc("OPTIONS /auth/login",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* for logging out of the backend and expiring the session */
+ mux.HandleFunc("GET /auth/logout",
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ auth.LogoutHandler(sessionManager),
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* handle OPTIONS preflight requests for /auth/logout */
+ mux.HandleFunc("OPTIONS /auth/logout",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* for verifying if a token is valid or not */
+ mux.Handle("GET /auth/token/validate", http.HandlerFunc(
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ auth.ValidateToken,
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ ))
+
+ /* handle OPTIONS preflight requests for /auth/token/validate */
+ mux.HandleFunc("OPTIONS /auth/token/validate",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* for listing files in a directory */
+ mux.Handle("POST /traverse/list-files", http.HandlerFunc(
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ middleware.AuthenticationMiddleware(traversal.ListFilesInDirectory),
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ ))
+
+ /* handle OPTIONS preflight requests for /traverse/list-files */
+ mux.HandleFunc("OPTIONS /traverse/list-files",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* for scheduling a transaction */
+ mux.Handle("POST /transactions/schedule", http.HandlerFunc(
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ middleware.AuthenticationMiddleware(sessionManager.IssueTransaction),
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ ))
+
+ /* handle OPTIONS preflight requests for /transactions/schedule */
+ mux.HandleFunc("OPTIONS /transactions/schedule",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /*
+ for fetching list of users matching the query in the LDAP server
+ supports URL params: q (Query)
+ */
+ mux.Handle("GET /users/ldap/search", http.HandlerFunc(
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ middleware.AuthenticationMiddleware(search.SearchUsersHandler),
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ ))
+
+ /* handle OPTIONS preflight requests for /users/ldap/search */
+ mux.HandleFunc("OPTIONS /users/ldap/search",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /*
+ websocket connection for streaming user session data from Redis
+ supports URL pamars: token (JWT authentication)
+ */
+ mux.Handle("/users/session", http.HandlerFunc(
+ middleware.LoggingMiddleware(
+ /* you need authentication via query parameter */
+ middleware.AuthenticationQueryMiddleware(sessionManager.StreamUserSession),
+ ),
+ ))
+
+ /*
+ websocket connection for streaming user transactions data from Redis
+ supports URL pamars: token (JWT authentication)
+ */
+ mux.Handle("/users/transactions/results", http.HandlerFunc(
+ middleware.LoggingMiddleware(
+ middleware.AuthenticationQueryMiddleware(sessionManager.StreamUserTransactionsResults),
+ ),
+ ))
+
+ /*
+ websocket connection for streaming user transactions data from Redis
+ supports URL pamars: token (JWT authentication)
+ */
+ mux.Handle("/users/transactions/pending", http.HandlerFunc(
+ middleware.LoggingMiddleware(
+ middleware.AuthenticationQueryMiddleware(sessionManager.StreamUserTransactionsPending),
+ ),
+ ))
+
+ /* ARCHIVE WILL BE MADE POST REQUEST -> Header based Authentication */
+
+ /* websocket connection for streaming user session data from PostgreSQL database (archived sessions) */
+ mux.Handle("POST /users/archive/session", http.HandlerFunc(
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ middleware.AuthenticationMiddleware(sessionManager.StreamUserArchiveSessions),
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ ))
+
+ /* handle OPTIONS preflight requests for /users/archive/session */
+ mux.HandleFunc("OPTIONS /users/archive/session",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* websocket connection for streaming user transactions data from PostgreSQL database (archived sessions) */
+ mux.Handle("POST /users/archive/transactions/results", http.HandlerFunc(
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ middleware.AuthenticationMiddleware(sessionManager.StreamUserArchiveResultsTransactions),
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ ))
+
+ /* handle OPTIONS preflight requests for /users/archive/transactions/results */
+ mux.HandleFunc("OPTIONS /users/archive/transactions/results",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+
+ /* websocket connection for streaming user transactions data from PostgreSQL database (archived sessions) */
+ mux.Handle("POST /users/archive/transactions/pending", http.HandlerFunc(
+ middleware.CORSMiddleware(
+ middleware.LoggingMiddleware(
+ middleware.AuthenticationMiddleware(sessionManager.StreamUserArchivePendingTransactions),
+ ),
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ ))
+
+ /* handle OPTIONS preflight requests for /users/archive/transactions/pending */
+ mux.HandleFunc("OPTIONS /users/archive/transactions/pending",
+ middleware.CORSMiddleware(
+ func(w http.ResponseWriter, r *http.Request) {
+ /*
+ This handler will never be called because CORSMiddleware handles OPTIONS
+ but we need it for the route to be registered
+ */
+ },
+ allowedOrigin,
+ allowedMethods,
+ allowedHeaders,
+ ),
+ )
+}
diff --git a/archive/stream_admin.go.archive b/archive/stream_admin.go.archive
new file mode 100644
index 0000000..0c69411
--- /dev/null
+++ b/archive/stream_admin.go.archive
@@ -0,0 +1,71 @@
+package session
+
+import (
+ "context"
+ "time"
+
+ "github.com/gorilla/websocket"
+)
+
+/* ==== User Sessions ==== */
+
+/* send list of all sesssions */
+func (m *Manager) sendListofAllSessions(conn *websocket.Conn, limit int) error {
+ ctx := context.Background()
+
+ /* cursor for navigating Redis Scan */
+ var cursor uint64
+
+ /* store all sessions keys here */
+ var sessionKeys []string
+
+ /* get all the session keys in the whole Redis database */
+ for {
+ keys, nextCursor, err := m.redis.Scan(ctx, cursor, "session:*", 100).Result()
+ if err != nil {
+ message := StreamMessage{
+ Type: "sessions_list",
+ Data: map[string]any{
+ "exists": false,
+ },
+ Timestamp: time.Now(),
+ }
+ return conn.WriteJSON(message)
+ }
+
+ sessionKeys = append(sessionKeys, keys...)
+ cursor = nextCursor
+ if cursor == 0 {
+ break
+ }
+ }
+
+ /* will contain data about all the sessions */
+ var sessions []SessionStreamData
+
+ /* get data about all the session from Redis */
+ for _, key := range sessionKeys {
+
+ /* get session data from Redis */
+ sessionData, err := m.redis.HGetAll(ctx, key).Result()
+ if err != nil || len(sessionData) == 0 {
+ continue
+ }
+
+ /* deserialize into SessionStreamData */
+ session := convertRedisHashToSession(sessionData)
+
+ sessions = append(sessions, session)
+ }
+
+ /* build the message with all sessions data */
+ message := StreamMessage{
+ Type: "sessions_list",
+ Data: map[string]any{
+ "sessions": sessions,
+ },
+ Timestamp: time.Now(),
+ }
+
+ return conn.WriteJSON(message)
+}
diff --git a/build/laclm-linux-amd64-source.tar.gz b/build/laclm-linux-amd64-source.tar.gz
new file mode 100644
index 0000000..ed21f2a
Binary files /dev/null and b/build/laclm-linux-amd64-source.tar.gz differ
diff --git a/build/laclm-linux-arm64-source.tar.gz b/build/laclm-linux-arm64-source.tar.gz
new file mode 100644
index 0000000..8e4dfbc
Binary files /dev/null and b/build/laclm-linux-arm64-source.tar.gz differ
diff --git a/cmd/laclm/main.go b/cmd/laclm/main.go
index 6ffde47..47895e4 100644
--- a/cmd/laclm/main.go
+++ b/cmd/laclm/main.go
@@ -1,9 +1,336 @@
package main
import (
+ "context"
"fmt"
+ "net/http"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/MakeNowJust/heredoc"
+ "github.com/jackc/pgx/v5/pgxpool"
+ "github.com/joho/godotenv"
+ "github.com/spf13/cobra"
+ "go.uber.org/automaxprocs/maxprocs"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/keepalive"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/api/routes"
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/grpcpool"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/postgresql"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/redis"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/scheduler"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/scheduler/fcfs"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/session"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/transprocessor"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/utils"
)
func main() {
- fmt.Println("Hello World")
+ if err := exec(); err != nil {
+ os.Exit(1)
+ }
+}
+
+func exec() error {
+
+ /* exec() wraps run() protecting it with user interrupts */
+
+ err := godotenv.Load()
+ if err != nil {
+ fmt.Print("No .env file found, continuing with system environment variables\n")
+ }
+
+ /* setting up cobra for cli interactions */
+ var (
+ configPath string
+ rootCmd = &cobra.Command{
+ Use: "laclm ",
+ Short: "Backend server for linux acl management",
+ Example: heredoc.Doc(`
+ $ laclm --config /path/to/config.yaml
+ `),
+ Run: func(cmd *cobra.Command, args []string) {
+ if configPath != "" {
+ fmt.Printf("Using config file: %s\n\n", configPath)
+ } else {
+ fmt.Printf("No config file provided.\n\n")
+ }
+ },
+ }
+ )
+
+ /* adding --config argument */
+ rootCmd.PersistentFlags().StringVar(&configPath, "config", "", "Path to config file")
+
+ /* Execute the command */
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Printf("arguments error: %s", err.Error())
+ os.Exit(1)
+ }
+
+ /*
+ load config file
+ if there is an error in loading the config file, then it will exit with code 1
+ */
+ if err := config.LoadConfig(configPath); err != nil {
+ fmt.Printf("Configuration Error in %s: %s",
+ configPath,
+ err.Error(),
+ )
+ /* since the configuration is invalid, don't proceed */
+ os.Exit(1)
+ }
+
+ /*
+ true for production, false for development mode
+ logger is only for http server and core components (after this step)
+ using logger for cli issues doesn't make sense
+ */
+ utils.InitLogger(!config.BackendConfig.AppInfo.DebugMode)
+
+ /* zap.L() can be used all over the code for global level logging */
+ zap.L().Info("Logger Initiated ...")
+
+ /* calculate max procs accurately (runtime.GOMAXPROCS(0)) */
+ if _, err := maxprocs.Set(); err != nil {
+ zap.L().Error("automaxprocs: failed to set GOMAXPROCS",
+ zap.Error(err),
+ )
+ }
+
+ /* preparing graceful shutdown for CTRL+C and docker */
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM)
+
+ go func() {
+ <-interrupt
+ cancel()
+ }()
+
+ return run(ctx)
+}
+
+func run(ctx context.Context) error {
+ var (
+ err error
+ wg sync.WaitGroup
+ )
+
+ /* create a context and waitgroup for the logging goroutine */
+ logCtx, logCancel := context.WithCancel(context.Background())
+ var logWg sync.WaitGroup
+
+ defer func() {
+ logCancel()
+ logWg.Wait()
+ }()
+
+ /* create a error channel */
+ errChLog := make(chan error, 1)
+
+ /* create the client pool for daemons (via gRPC) */
+ /* unsecure for now */
+
+ /* attempting to keep connections alive all the time even with no activity */
+ var kacp = keepalive.ClientParameters{
+ /* send pings every 10 seconds if there is no activity */
+ Time: 10 * time.Second,
+
+ /* wait 2 second for ping ack before considering the connection dead */
+ Timeout: 2 * time.Second,
+
+ /* send pings even without active streams */
+ PermitWithoutStream: true,
+ }
+
+ pool := grpcpool.NewClientPool(
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ grpc.WithKeepaliveParams(kacp),
+ )
+
+ /*
+ initializing scheduler
+ scheduler uses context to quit - part of waitgroup
+ propagates error through error channel
+ */
+ errChShed := make(chan error, 1)
+
+ logRedisClient, err := redis.NewRedisClient(
+ config.BackendConfig.Database.TransactionLogRedis.Address,
+ config.BackendConfig.Database.TransactionLogRedis.Password,
+ config.BackendConfig.Database.TransactionLogRedis.DB,
+ )
+ if err != nil {
+ zap.L().Fatal("Failed to connect to Redis", zap.Error(err))
+ }
+
+ pqDB := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=%s",
+ config.BackendConfig.Database.ArchivalPQ.User,
+ config.BackendConfig.Database.ArchivalPQ.Password,
+ config.BackendConfig.Database.ArchivalPQ.Host,
+ config.BackendConfig.Database.ArchivalPQ.Port,
+ config.BackendConfig.Database.ArchivalPQ.DBName,
+ config.BackendConfig.Database.ArchivalPQ.SSLMode,
+ )
+
+ poolPQ, err := pgxpool.New(context.Background(), pqDB)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to create connection pool: %v\n", err)
+ os.Exit(1)
+ }
+
+ archivalPQ := postgresql.New(poolPQ)
+
+ /* create a session manager */
+ sessionManager := session.NewManager(logRedisClient, archivalPQ, errChLog)
+
+ /* create a permissions processor */
+ permProcessor := transprocessor.NewPermProcessor(pool, errChLog)
+
+ /* start logging goroutine - should be last to exit */
+ logWg.Add(1)
+ go func(ctx context.Context) {
+ defer logWg.Done()
+ zap.L().Info("log error handler started")
+ for {
+ select {
+ case err, ok := <-errChLog:
+ if !ok {
+ zap.L().Info("log error channel closed")
+ return
+ }
+ if err != nil {
+ zap.L().Error("log error occurred",
+ zap.Error(err),
+ zap.Time("timestamp", time.Now()),
+ )
+ }
+ case <-ctx.Done():
+ zap.L().Info("log error handler shutting down")
+ return
+ }
+ }
+ }(logCtx)
+
+ /* currently FCFS scheduler */
+ transSched := fcfs.NewFCFSScheduler(sessionManager, permProcessor)
+
+ /* initialize the scheduler */
+ scheduler.InitScheduler(ctx, transSched, &wg, errChShed)
+
+ /* setting up http mux and routes */
+ mux := http.NewServeMux()
+
+ /* routes declared in /api/routes.go */
+ routes.RegisterRoutes(mux, sessionManager)
+
+ /* create a http server */
+ server := &http.Server{
+ Addr: fmt.Sprintf("%s:%d",
+ config.BackendConfig.Server.Host,
+ config.BackendConfig.Server.Port,
+ ),
+ Handler: mux,
+ }
+
+ /* starting http server as a goroutine */
+ go func() {
+ zap.L().Info("HTTP REST API server starting",
+ zap.String("Host", config.BackendConfig.Server.Host),
+ zap.Int("Port", config.BackendConfig.Server.Port),
+ )
+ if err = server.ListenAndServe(); err != http.ErrServerClosed {
+ zap.L().Error("ListenAndServe error",
+ zap.Error(err),
+ )
+ }
+ }()
+
+ /*
+ whatever written here will be protected by graceful shutdowns
+ all the functions called must be async here and ready for graceful shutdowns
+ */
+
+ /*
+ scheduler is a core feature of the application
+ when an error occurs in the scheduler, the system needs to be shutdown
+ since nothing can work without the scheduler
+ */
+ select {
+ case <-ctx.Done():
+ zap.L().Info("Shutdown process initiated")
+ case err = <-errChShed:
+
+ /* context done can be called here (optional for now) */
+
+ zap.L().Error("Fatal Error from scheduler",
+ zap.Error(err),
+ )
+ return err
+ }
+
+ /*
+ after this, exit signal is triggered
+ following code must be executed to shutdown graceful shutdown
+ call all the kill switches with context
+ */
+
+ /* graceful shutdown of http server - 5 seconds for allowing completion current API requests */
+ shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer shutdownCancel()
+
+ /* initiate http server shutdown */
+ if err = server.Shutdown(shutdownCtx); err != nil {
+ zap.L().Error("HTTP server shutdown error",
+ zap.Error(err),
+ )
+ }
+
+ zap.L().Info("HTTP server stopped")
+
+ /* after the http server is stopped, rest of the components can be shutdown */
+
+ usernames := sessionManager.GetAllUsernames()
+ for _, username := range usernames {
+ if err := sessionManager.ExpireSession(username); err != nil {
+ zap.L().Warn("Failed to expire session during shutdown",
+ zap.String("username: ", username),
+ )
+ }
+ zap.L().Info("Session forced expired for: ",
+ zap.String("username", username),
+ )
+ }
+
+ wg.Wait()
+
+ /* close connections with daemon */
+ pool.CloseAll(errChLog)
+
+ /* flush Redis data before closing */
+ if err := logRedisClient.FlushAll(context.Background()); err != nil {
+ zap.L().Error("Failed to flush Redis data during shutdown",
+ zap.Error(err),
+ )
+ }
+
+ /* close archival database connection */
+ poolPQ.Close()
+
+ zap.L().Info("All background processes closed gracefully")
+
+ /* close the logging error channel and cancel logging context */
+ close(errChLog)
+
+ return err
}
diff --git a/cmd/ws_test/go.mod b/cmd/ws_test/go.mod
new file mode 100644
index 0000000..3934ca8
--- /dev/null
+++ b/cmd/ws_test/go.mod
@@ -0,0 +1,5 @@
+module ws_test
+
+go 1.24.2
+
+require github.com/gorilla/websocket v1.5.3 // indirect
diff --git a/cmd/ws_test/main.go b/cmd/ws_test/main.go
new file mode 100644
index 0000000..39f391c
--- /dev/null
+++ b/cmd/ws_test/main.go
@@ -0,0 +1,59 @@
+// wsclient.go
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+
+ "github.com/gorilla/websocket"
+)
+
+func main() {
+ if len(os.Args) < 3 {
+ fmt.Println("Usage: go run wsclient.go ")
+ return
+ }
+
+ url := os.Args[1]
+ jwt := os.Args[2]
+
+ header := http.Header{}
+ header.Add("Authorization", "Bearer "+jwt)
+
+ urlQ := fmt.Sprintf("%s?token=%s", url, jwt)
+
+ conn, resp, err := websocket.DefaultDialer.Dial(urlQ, header)
+ if err != nil {
+ log.Fatalf("Dial failed: %v, response: %+v", err, resp)
+ }
+ defer conn.Close()
+
+ fmt.Println("Connected to", url)
+ fmt.Println("Type a message and press Enter to send. Ctrl+C to quit.")
+
+ // Read incoming messages in a goroutine
+ go func() {
+ for {
+ _, msg, err := conn.ReadMessage()
+ if err != nil {
+ log.Println("Read error:", err)
+ os.Exit(1)
+ }
+ fmt.Println("Received:", string(msg))
+ }
+ }()
+
+ // Read user input and send messages
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ text := scanner.Text()
+ err := conn.WriteMessage(websocket.TextMessage, []byte(text))
+ if err != nil {
+ log.Println("Write error:", err)
+ break
+ }
+ }
+}
diff --git a/config.yaml b/config.yaml
new file mode 100644
index 0000000..2f93888
--- /dev/null
+++ b/config.yaml
@@ -0,0 +1,58 @@
+# backend environment configs
+app:
+ name: laclm
+ version: v1.1
+ debug_mode: true
+ session_timeout: 1
+ base_path: /mnt
+ max_workers: 5
+
+# backend server deployment configs
+server:
+ host: 0.0.0.0
+ port: 8080
+
+# databases for operations
+database:
+ transaction_log_redis:
+ address: localhost:6379
+ password: ${LACLM_TRANS_REDIS_PASSWORD}
+ db: 0
+ archival_postgres:
+ host: localhost
+ port: 5432
+ user: postgres
+ password: idontlikepeople
+ dbname: postgres
+ sslmode: disable
+
+# logging configurations
+logging:
+ file: logs/app.log
+ max_size: 100
+ max_backups: 5
+ max_age: 30
+ compress: true
+
+# filesystem server that needs management
+filesystem_servers:
+ - path: /nfs-system
+ method: remote
+ remote:
+ host: localhost
+ port: 6593
+ - path: /beegfs-system
+ method: local
+
+# authentication information
+authentication:
+ ldap:
+ tls: false
+ address: "ldap://localhost:389"
+ admin_dn: ${LACLM_LDAP_ADMIN_DN}
+ admin_password: ${LACLM_LDAP_ADMIN_PASSWORD}
+ search_base: "cn=Princeton Plainsboro Hospital ,dc=myorg,dc=local"
+
+backend_security:
+ jwt_secret_token: ${JWT_SECRET_TOKEN}
+ jwt_expiry: 1
diff --git a/config/app.go b/config/app.go
new file mode 100644
index 0000000..12a9b97
--- /dev/null
+++ b/config/app.go
@@ -0,0 +1,54 @@
+package config
+
+import (
+ "errors"
+
+ "github.com/MakeNowJust/heredoc"
+)
+
+/* app parameters */
+type App struct {
+ Name string `yaml:"name,omitempty"`
+ Version string `yaml:"version,omitempty"`
+ DebugMode bool `yaml:"debug_mode,omitempty"`
+ SessionTimeout int `yaml:"session_timeout,omitempty"`
+ BasePath string `yaml:"base_path,omitempty"`
+ MaxWorkers int `yaml:"max_workers,omitempty"`
+}
+
+/* normalization function */
+func (a *App) Normalize() error {
+
+ /* set default name to laclm */
+ if a.Name == "" {
+ a.Name = "laclm"
+ }
+
+ /* set default version to v1.1 */
+ if a.Version == "" {
+ a.Name = "v1.1"
+ }
+
+ /*
+ if debug_mode is not provided, it's false
+ we want production to be true
+ */
+
+ /* set default session timeout to 24 hours */
+ if a.SessionTimeout == 0 {
+ a.SessionTimeout = 24
+ }
+
+ /* check if base path is specified */
+ if a.BasePath == "" {
+ return errors.New(heredoc.Doc(`
+ Base path is not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* max_workers can be zero - it will be adjusted scheduler */
+
+ return nil
+}
diff --git a/config/authentication.go b/config/authentication.go
new file mode 100644
index 0000000..667314a
--- /dev/null
+++ b/config/authentication.go
@@ -0,0 +1,69 @@
+package config
+
+import (
+ "errors"
+
+ "github.com/MakeNowJust/heredoc"
+)
+
+/* authentication parameters */
+type Authentication struct {
+ LDAPConfig LDAPConfig `yaml:"ldap,omitempty"`
+}
+
+/* ldap authentication parameters */
+type LDAPConfig struct {
+ TLS bool `yaml:"tls,omitempty"`
+ Address string `yaml:"address,omitempty"`
+ AdminDN string `yaml:"admin_dn,omitempty"`
+ AdminPassword string `yaml:"admin_password,omitempty"`
+ SearchBase string `yaml:"search_base,omitempty"`
+}
+
+/* normalization function */
+func (a *Authentication) Normalize() error {
+ return a.LDAPConfig.Normalize()
+}
+
+/* ldap authentication normalization function */
+func (l *LDAPConfig) Normalize() error {
+ /* TLS will be false by default */
+
+ /* check if address is specified */
+ if l.Address == "" {
+ return errors.New(heredoc.Doc(`
+ LDAP address is not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* check if admin DN is specified */
+ if l.AdminDN == "" {
+ return errors.New(heredoc.Doc(`
+ LDAP admin DN is not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* check if admin password is specified */
+ if l.AdminPassword == "" {
+ return errors.New(heredoc.Doc(`
+ LDAP admin password is not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* check if search base is specified */
+ if l.SearchBase == "" {
+ return errors.New(heredoc.Doc(`
+ LDAP search base is not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ return nil
+}
diff --git a/config/backend_security.go b/config/backend_security.go
new file mode 100644
index 0000000..5738a25
--- /dev/null
+++ b/config/backend_security.go
@@ -0,0 +1,35 @@
+package config
+
+import (
+ "errors"
+
+ "github.com/MakeNowJust/heredoc"
+)
+
+/* backend security configs */
+type BackendSecurity struct {
+ JWTTokenSecret string `yaml:"jwt_secret_token,omitempty"`
+
+ /* make this obselete */
+ JWTExpiry int `yaml:"jwt_expiry,omitempty"`
+}
+
+/* normalization function */
+func (b *BackendSecurity) Normalize() error {
+
+ /* check if JWT token secret is specified */
+ if b.JWTTokenSecret == "" {
+ return errors.New(heredoc.Doc(`
+ JWT Token Security is not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* set default JWT expiry to 24 hours */
+ if b.JWTExpiry == 0 {
+ b.JWTExpiry = 24
+ }
+
+ return nil
+}
diff --git a/config/config.go b/config/config.go
index d912156..67524b2 100644
--- a/config/config.go
+++ b/config/config.go
@@ -1 +1,52 @@
package config
+
+import "fmt"
+
+/* globally accessible yaml config */
+var BackendConfig Config
+
+/* complete yaml config for global usage */
+type Config struct {
+ AppInfo App `yaml:"app,omitempty"`
+ Server Server `yaml:"server,omitempty"`
+ Database Database `yaml:"database,omitempty"`
+ Logging Logging `yaml:"logging,omitempty"`
+ FileSystemServers []FileSystemServers `yaml:"filesystem_servers,omitempty"`
+ BackendSecurity BackendSecurity `yaml:"backend_security,omitempty"`
+ Authentication Authentication `yaml:"authentication,omitempty"`
+}
+
+/* complete config normalizer function */
+func (c *Config) Normalize() error {
+ if err := c.AppInfo.Normalize(); err != nil {
+ return fmt.Errorf("app configuration error: %w", err)
+ }
+
+ if err := c.Server.Normalize(); err != nil {
+ return fmt.Errorf("server configuration error: %w", err)
+ }
+
+ if err := c.Database.Normalize(); err != nil {
+ return fmt.Errorf("database configuration error: %w", err)
+ }
+
+ if err := c.Logging.Normalize(); err != nil {
+ return fmt.Errorf("logging configuration error: %w", err)
+ }
+
+ for i := range c.FileSystemServers {
+ if err := c.FileSystemServers[i].Normalize(); err != nil {
+ return fmt.Errorf("file system server [%d] error: %w", i, err)
+ }
+ }
+
+ if err := c.BackendSecurity.Normalize(); err != nil {
+ return fmt.Errorf("backend security configuration error: %w", err)
+ }
+
+ if err := c.Authentication.Normalize(); err != nil {
+ return fmt.Errorf("authentication configuration error: %w", err)
+ }
+
+ return nil
+}
diff --git a/config/database.go b/config/database.go
new file mode 100644
index 0000000..fce193c
--- /dev/null
+++ b/config/database.go
@@ -0,0 +1,105 @@
+package config
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/MakeNowJust/heredoc"
+)
+
+/* database parameters */
+type Database struct {
+ TransactionLogRedis TransactionLogRedis `yaml:"transaction_log_redis,omitempty"`
+ ArchivalPQ ArchivalPQ `yaml:"archival_postgres,omitempty"`
+}
+
+/* transaction log redis parameters */
+type TransactionLogRedis struct {
+ Address string `yaml:"address,omitempty"`
+ Password string `yaml:"password,omitempty"`
+ DB int `yaml:"db,omitempty"`
+}
+
+/* archival PostgreSQL parameters */
+type ArchivalPQ struct {
+ Host string `yaml:"host,omitempty"`
+ Port int `yaml:"port,omitempty"`
+ User string `yaml:"user,omitempty"`
+ Password string `yaml:"password,omitempty"`
+ DBName string `yaml:"dbname,omitempty"`
+ SSLMode string `yaml:"sslmode,omitempty"`
+}
+
+/* normalization function for database */
+func (d *Database) Normalize() error {
+ /* check if Redis parameters are valid */
+ err := d.TransactionLogRedis.Normalize()
+ if err != nil {
+ return err
+ }
+
+ /* check if PostgreSQL parameters are valid */
+ err = d.ArchivalPQ.Normalize()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+/* transaction log redis normalization function */
+func (r *TransactionLogRedis) Normalize() error {
+ if r.Address == "" {
+ return errors.New(heredoc.Doc(`
+ Transaction Log Redis Address is not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* password can be empty */
+ if r.Password == "" {
+ /* just warn users to use password protected redis */
+ fmt.Printf("Prefer using password for redis for security purposes\n\n")
+ }
+
+ /* r.DB default value can be 0 */
+
+ return nil
+}
+
+/* archival PostgreSQL parameters */
+func (a *ArchivalPQ) Normalize() error {
+
+ /* return localhost if empty */
+ if a.Host == "" {
+ a.Host = "localhost"
+ }
+
+ /* return default port if empty */
+ if a.Port == 0 {
+ a.Port = 5432
+ }
+
+ /* username is mandatory */
+ if a.User == "" {
+ return errors.New("Database username is not set in the configuration.")
+ }
+
+ /* dbname is mandatory */
+ if a.DBName == "" {
+ return errors.New("Database name (dbname) is not set in the configuration.")
+ }
+
+ /* sslmode is disabled by default */
+ if a.SSLMode == "" {
+ a.SSLMode = "disable"
+ }
+
+ /* empty password but give a warning */
+ if a.Password == "" {
+ fmt.Printf("Warning: Connecting to PostgreSQL without a password. Consider using one for security.\n\n")
+ }
+
+ return nil
+}
diff --git a/config/filesystem.go b/config/filesystem.go
new file mode 100644
index 0000000..7c767b3
--- /dev/null
+++ b/config/filesystem.go
@@ -0,0 +1,68 @@
+package config
+
+import (
+ "errors"
+
+ "github.com/MakeNowJust/heredoc"
+)
+
+/* file system server parameters */
+type FileSystemServers struct {
+ Path string `yaml:"path,omitempty"`
+ Method string `yaml:"method,omitempty"`
+ Remote *Remote `yaml:"remote,omitempty"`
+}
+
+/* remote parameters for file system server with laclm daemons installed */
+type Remote struct {
+ Host string `yaml:"host,omitempty"`
+ Port int `yaml:"port,omitempty"`
+}
+
+/* normalization function */
+func (f *FileSystemServers) Normalize() error {
+ if f.Path == "" {
+ return errors.New(heredoc.Doc(`
+ Remote server file path not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* set default method to local */
+ if f.Method == "" {
+ f.Method = "local"
+ }
+
+ /* check if method is remote */
+ if f.Method == "remote" {
+ /* check if remote is specified */
+ if f.Remote == nil {
+ return errors.New(heredoc.Doc(`
+ Remote file server not specified in the configuration file.
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* check if host is specified */
+ if f.Remote.Host == "" {
+ return errors.New(heredoc.Doc(`
+ Address not provided for remote file server
+
+ Please check the docs for more information:
+ `))
+ }
+
+ /* check if port is specified */
+ if f.Remote.Port == 0 {
+ return errors.New(heredoc.Doc(`
+ Port not provided for remote file server
+
+ Please check the docs for more information:
+ `))
+ }
+ }
+
+ return nil
+}
diff --git a/config/loader.go b/config/loader.go
new file mode 100644
index 0000000..398eccc
--- /dev/null
+++ b/config/loader.go
@@ -0,0 +1,49 @@
+package config
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/davecgh/go-spew/spew"
+ "gopkg.in/yaml.v3"
+)
+
+/*
+ we need config normalization as well
+ config normalization fixes all the fields that are not present in config file
+ and sets it to default value
+*/
+
+/* loads yaml config file from given file path */
+func LoadConfig(path string) error {
+
+ /* read the yaml config file */
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("config loading error %w",
+ err,
+ )
+
+ }
+
+ /* expand all environment variables in the yaml config */
+ expanded := os.ExpandEnv(string(data))
+
+ /* unmarshal the yaml file to defined struct */
+ err = yaml.Unmarshal([]byte(expanded), &BackendConfig)
+ if err != nil {
+ return fmt.Errorf("config loading error %w",
+ err,
+ )
+ }
+
+ /* write the config file in console if in debug mode */
+ if BackendConfig.AppInfo.DebugMode {
+ fmt.Println("Contents of Config File (debug mode ON)")
+ spew.Dump(BackendConfig)
+ fmt.Println()
+ }
+
+ /* normalize the complete backend config before proceeding */
+ return BackendConfig.Normalize()
+}
diff --git a/config/logging.go b/config/logging.go
new file mode 100644
index 0000000..8956c76
--- /dev/null
+++ b/config/logging.go
@@ -0,0 +1,33 @@
+package config
+
+/* logging parameters */
+type Logging struct {
+ File string `yaml:"file,omitempty"`
+ MaxSize int `yaml:"max_size,omitempty"`
+ MaxBackups int `yaml:"max_backups,omitempty"`
+ MaxAge int `yaml:"max_age,omitempty"`
+ Compress bool `yaml:"compress,omitempty"`
+}
+
+/* normalization function */
+func (l *Logging) Normalize() error {
+
+ /* set default file to log/app.log */
+ if l.File == "" {
+ l.File = "log/app.log"
+ }
+
+ /* set default max size to 100MB */
+ if l.MaxSize == 0 {
+ l.MaxSize = 100
+ }
+
+ /* set default max backups to 3 */
+ if l.MaxBackups == 0 {
+ l.MaxBackups = 3
+ }
+
+ /* let compression remain false by default */
+
+ return nil
+}
diff --git a/config/server.go b/config/server.go
new file mode 100644
index 0000000..f8be6a1
--- /dev/null
+++ b/config/server.go
@@ -0,0 +1,23 @@
+package config
+
+/* server deployment parameters */
+type Server struct {
+ Host string `yaml:"host,omitempty"`
+ Port int `yaml:"port,omitempty"`
+}
+
+/* normalization function */
+func (s *Server) Normalize() error {
+
+ /* set default host to localhost */
+ if s.Host == "" {
+ s.Host = "localhost"
+ }
+
+ /* set default port to 8080 */
+ if s.Port == 0 {
+ s.Port = 8080
+ }
+
+ return nil
+}
diff --git a/init/init.go b/db/migrations/000001_initial_schema.down.sql
similarity index 100%
rename from init/init.go
rename to db/migrations/000001_initial_schema.down.sql
diff --git a/internal/authentication/authentication.go b/db/migrations/000001_initial_schema.up.sql
similarity index 100%
rename from internal/authentication/authentication.go
rename to db/migrations/000001_initial_schema.up.sql
diff --git a/db/queries/pending_transactions.sql b/db/queries/pending_transactions.sql
new file mode 100644
index 0000000..cfe6eb8
--- /dev/null
+++ b/db/queries/pending_transactions.sql
@@ -0,0 +1,82 @@
+-- name: CreatePendingTransactionPQ :one
+INSERT INTO pending_transactions_archive (
+ id,
+ session_id,
+ timestamp,
+ operation,
+ target_path,
+ entries,
+ status,
+ error_msg,
+ output,
+ executed_by,
+ duration_ms,
+ ExecStatus
+) VALUES (
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12
+) RETURNING *;
+
+-- name: GetPendingTransactionPQ :one
+SELECT * FROM pending_transactions_archive
+WHERE id = $1;
+
+-- name: GetPendingTransactionsBySessionPQ :many
+SELECT * FROM pending_transactions_archive
+WHERE session_id = $1
+ORDER BY timestamp DESC;
+
+-- name: GetPendingTransactionsPQ :many
+SELECT * FROM pending_transactions_archive
+WHERE session_id = $1 AND status = 'pending'
+ORDER BY timestamp DESC;
+
+-- name: GetPendingTransactionsByOperationPQ :many
+SELECT * FROM pending_transactions_archive
+WHERE session_id = $1 AND operation = $2
+ORDER BY timestamp DESC;
+
+-- name: GetPendingTransactionsByPathPQ :many
+SELECT * FROM pending_transactions_archive
+WHERE session_id = $1 AND target_path = $2
+ORDER BY timestamp DESC;
+
+-- name: UpdatePendingTransactionStatusPQ :one
+UPDATE pending_transactions_archive
+SET
+ status = $2,
+ error_msg = $3,
+ output = $4,
+ duration_ms = $5,
+ ExecStatus = $6
+WHERE id = $1
+RETURNING *;
+
+-- name: DeletePendingTransactionPQ :exec
+DELETE FROM pending_transactions_archive
+WHERE id = $1;
+
+-- name: DeletePendingTransactionsBySessionPQ :exec
+DELETE FROM pending_transactions_archive
+WHERE session_id = $1;
+
+-- name: CountPendingTransactionsByStatusPQ :one
+SELECT COUNT(*) FROM pending_transactions_archive
+WHERE session_id = $1 AND status = $2;
+
+-- name: CountPendingTransactionsByOperationPQ :one
+SELECT COUNT(*) FROM pending_transactions_archive
+WHERE session_id = $1 AND operation = $2;
+
+-- name: GetPendingTransactionStatsPQ :one
+SELECT
+ COUNT(*) as total_transactions,
+ COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_transactions,
+ AVG(duration_ms) as avg_duration_ms
+FROM pending_transactions_archive
+WHERE session_id = $1;
+
+-- name: GetPendingTransactionsByUserPaginatedPQ :many
+SELECT * FROM pending_transactions_archive
+WHERE executed_by = $1
+ORDER BY timestamp DESC
+LIMIT $2 OFFSET $3;
diff --git a/db/queries/results_transactions.sql b/db/queries/results_transactions.sql
new file mode 100644
index 0000000..81a143b
--- /dev/null
+++ b/db/queries/results_transactions.sql
@@ -0,0 +1,88 @@
+-- name: CreateResultsTransactionPQ :one
+INSERT INTO results_transactions_archive (
+ id,
+ session_id,
+ timestamp,
+ operation,
+ target_path,
+ entries,
+ status,
+ error_msg,
+ output,
+ executed_by,
+ duration_ms,
+ ExecStatus
+) VALUES (
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12
+) RETURNING *;
+
+-- name: GetResultsTransactionPQ :one
+SELECT * FROM results_transactions_archive
+WHERE id = $1;
+
+-- name: GetResultsTransactionsBySessionPQ :many
+SELECT * FROM results_transactions_archive
+WHERE session_id = $1
+ORDER BY timestamp DESC;
+
+-- name: GetSuccessfulResultsTransactionsPQ :many
+SELECT * FROM results_transactions_archive
+WHERE session_id = $1 AND status = 'success'
+ORDER BY timestamp DESC;
+
+-- name: GetFailedResultsTransactionsPQ :many
+SELECT * FROM results_transactions_archive
+WHERE session_id = $1 AND status = 'failed'
+ORDER BY timestamp DESC;
+
+-- name: GetResultsTransactionsByOperationPQ :many
+SELECT * FROM results_transactions_archive
+WHERE session_id = $1 AND operation = $2
+ORDER BY timestamp DESC;
+
+-- name: GetResultsTransactionsByPathPQ :many
+SELECT * FROM results_transactions_archive
+WHERE session_id = $1 AND target_path = $2
+ORDER BY timestamp DESC;
+
+-- name: UpdateResultsTransactionStatusPQ :one
+UPDATE results_transactions_archive
+SET
+ status = $2,
+ error_msg = $3,
+ output = $4,
+ duration_ms = $5,
+ ExecStatus = $6
+WHERE id = $1
+RETURNING *;
+
+-- name: DeleteResultsTransactionPQ :exec
+DELETE FROM results_transactions_archive
+WHERE id = $1;
+
+-- name: DeleteResultsTransactionsBySessionPQ :exec
+DELETE FROM results_transactions_archive
+WHERE session_id = $1;
+
+-- name: CountResultsTransactionsByStatusPQ :one
+SELECT COUNT(*) FROM results_transactions_archive
+WHERE session_id = $1 AND status = $2;
+
+-- name: CountResultsTransactionsByOperationPQ :one
+SELECT COUNT(*) FROM results_transactions_archive
+WHERE session_id = $1 AND operation = $2;
+
+-- name: GetResultsTransactionStatsPQ :one
+SELECT
+ COUNT(*) as total_transactions,
+ COUNT(CASE WHEN status = 'success' THEN 1 END) as successful_transactions,
+ COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_transactions,
+ AVG(duration_ms) as avg_duration_ms
+FROM results_transactions_archive
+WHERE session_id = $1;
+
+-- name: GetResultsTransactionsByUserPaginatedPQ :many
+SELECT * FROM results_transactions_archive
+WHERE executed_by = $1
+ORDER BY timestamp DESC
+LIMIT $2 OFFSET $3;
diff --git a/db/queries/sessions.sql b/db/queries/sessions.sql
new file mode 100644
index 0000000..a09077c
--- /dev/null
+++ b/db/queries/sessions.sql
@@ -0,0 +1,21 @@
+-- name: StoreSessionPQ :one
+INSERT INTO sessions_archive (
+ id, username, ip, user_agent, status,
+ created_at, last_active_at, expiry,
+ completed_count, failed_count
+) VALUES (
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10
+) RETURNING *;
+
+-- name: GetSessionPQ :one
+SELECT * FROM sessions_archive
+WHERE id = $1;
+
+-- name: GetSessionByUsernamePaginatedPQ :many
+SELECT * FROM sessions_archive
+WHERE username = $1
+ORDER BY created_at DESC
+LIMIT $2 OFFSET $3;
+
+-- name: DeleteSessionPQ :exec
+DELETE FROM sessions_archive WHERE id = $1;
diff --git a/db/schema.sql b/db/schema.sql
new file mode 100644
index 0000000..5120cf6
--- /dev/null
+++ b/db/schema.sql
@@ -0,0 +1,49 @@
+-- database schema for sqlc code generation for archival PostgreSQL
+
+CREATE TABLE IF NOT EXISTS sessions_archive (
+ id UUID PRIMARY KEY,
+ username TEXT NOT NULL,
+ ip TEXT,
+ user_agent TEXT,
+ status TEXT CHECK (status IN ('active', 'expired', 'pending')) NOT NULL,
+ created_at TIMESTAMP NOT NULL,
+ last_active_at TIMESTAMP NOT NULL,
+ expiry TIMESTAMP NOT NULL,
+ completed_count INTEGER DEFAULT 0,
+ failed_count INTEGER DEFAULT 0,
+ archived_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE IF NOT EXISTS pending_transactions_archive (
+ id UUID PRIMARY KEY,
+ session_id UUID NOT NULL,
+ timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
+ operation VARCHAR(20) NOT NULL CHECK (operation IN ('getfacl', 'setfacl')),
+ target_path TEXT NOT NULL,
+ entries JSONB NOT NULL DEFAULT '[]'::jsonb,
+ status TEXT CHECK (status IN ('pending')) NOT NULL,
+ error_msg TEXT,
+ output TEXT,
+ executed_by VARCHAR(255) NOT NULL,
+ duration_ms BIGINT,
+ ExecStatus BOOLEAN NOT NULL DEFAULT FALSE,
+ created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
+);
+
+CREATE TABLE IF NOT EXISTS results_transactions_archive (
+ id UUID PRIMARY KEY,
+ session_id UUID NOT NULL,
+ timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
+ operation VARCHAR(20) NOT NULL CHECK (operation IN ('getfacl', 'setfacl')),
+ target_path TEXT NOT NULL,
+ entries JSONB NOT NULL DEFAULT '[]'::jsonb,
+ status TEXT CHECK (status IN ('success', 'failed')) NOT NULL,
+ error_msg TEXT,
+ output TEXT,
+ executed_by VARCHAR(255) NOT NULL,
+ duration_ms BIGINT,
+ ExecStatus BOOLEAN NOT NULL DEFAULT FALSE,
+ created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
+);
+
+/* add indexing for optimization */
diff --git a/development/docker-compose.ldap.yaml b/development/docker-compose.ldap.yaml
new file mode 100644
index 0000000..6bdf05b
--- /dev/null
+++ b/development/docker-compose.ldap.yaml
@@ -0,0 +1,31 @@
+services:
+ openldap:
+ image: osixia/openldap:latest
+ container_name: institutional-openldap
+ environment:
+ LDAP_ORGANISATION: "My Org"
+ LDAP_DOMAIN: "myorg.local"
+ LDAP_ADMIN_PASSWORD: "admin"
+ ports:
+ - "389:389"
+ - "636:636"
+ networks:
+ - backend
+
+ phpldapadmin:
+ image: osixia/phpldapadmin:0.9.0
+ container_name: phpldapadmin
+ environment:
+ PHPLDAPADMIN_LDAP_HOSTS: openldap
+ PHPLDAPADMIN_HTTPS: "false"
+ ports:
+ - "8090:80"
+ depends_on:
+ - openldap
+ networks:
+ - backend
+
+networks:
+ backend:
+ driver: bridge
+
diff --git a/development/docker-prune.sh b/development/docker-prune.sh
new file mode 100755
index 0000000..6902c87
--- /dev/null
+++ b/development/docker-prune.sh
@@ -0,0 +1,4 @@
+sudo docker rm -f $(sudo docker ps -aq) && \
+sudo docker rmi -f $(sudo docker images -aq) && \
+sudo docker volume rm $(sudo docker volume ls -q) && \
+sudo docker network prune -f
diff --git a/development/stress.sh b/development/stress.sh
new file mode 100755
index 0000000..92bfe57
--- /dev/null
+++ b/development/stress.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# Step 1: Login to get the token
+login_response=$(curl -s -X POST http://localhost:8080/login \
+ -H "Content-Type: application/json" \
+ -d '{"username": "ghouse", "password": "idontlikepeople"}')
+
+echo "$login_response"
+
+sleep 1
+
+# Step 2: Extract token using jq (requires jq to be installed)
+token=$(echo "$login_response" | jq -r '.token')
+
+# Optional: check if token is empty
+if [ -z "$token" ] || [ "$token" == "null" ]; then
+ echo "Failed to get JWT token"
+ echo "Response: $login_response"
+ exit 1
+fi
+
+echo $token
+
+while true; do
+ curl -X POST http://localhost:8080/transactions/schedule \
+ -H "Authorization: Bearer $token" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify"
+ }
+ }'
+done
diff --git a/development/traversal.sh b/development/traversal.sh
new file mode 100644
index 0000000..ec4020a
--- /dev/null
+++ b/development/traversal.sh
@@ -0,0 +1,6 @@
+curl -X POST http://localhost:8080/traverse/list-files \
+ -H "Authorization: Bearer $token" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "file_path": "test.txt"
+}'
diff --git a/docker-compose.beegfs.yaml b/docker-compose.beegfs.yaml
new file mode 100644
index 0000000..f3f79d9
--- /dev/null
+++ b/docker-compose.beegfs.yaml
@@ -0,0 +1,69 @@
+version: '3.8'
+
+services:
+ beegfs-mgmtd:
+ image: ghcr.io/thinkparq/beegfs-mgmtd:latest
+ container_name: beegfs-mgmtd
+ ports:
+ - "8008:8008"
+ networks:
+ - beegfs-net
+
+ beegfs-meta:
+ image: ghcr.io/thinkparq/beegfs-meta:latest
+ container_name: beegfs-meta
+ depends_on:
+ - beegfs-mgmtd
+ environment:
+ - BEEGFS_MGMTD_HOSTNAME=beegfs-mgmtd
+ networks:
+ - beegfs-net
+
+ beegfs-storage:
+ image: ghcr.io/thinkparq/beegfs-storage:latest
+ container_name: beegfs-storage
+ depends_on:
+ - beegfs-mgmtd
+ environment:
+ - BEEGFS_MGMTD_HOSTNAME=beegfs-mgmtd
+ volumes:
+ - beegfs-storage-data:/data
+ networks:
+ - beegfs-net
+
+ beegfs-client:
+ image: ghcr.io/thinkparq/beegfs-client:latest
+ container_name: beegfs-client
+ privileged: true
+ depends_on:
+ - beegfs-mgmtd
+ - beegfs-meta
+ - beegfs-storage
+ environment:
+ - BEEGFS_MGMTD_HOSTNAME=beegfs-mgmtd
+ volumes:
+ - beegfs-client-mount:/mnt/beegfs
+ networks:
+ - beegfs-net
+
+ nfs-export:
+ image: itsthenetwork/nfs-server-alpine:latest
+ container_name: nfs-export
+ depends_on:
+ - beegfs-client
+ environment:
+ - SHARED_DIRECTORY=/exports
+ volumes:
+ - beegfs-client-mount:/exports
+ ports:
+ - "2049:2049"
+ networks:
+ - beegfs-net
+
+volumes:
+ beegfs-storage-data:
+ beegfs-client-mount:
+
+networks:
+ beegfs-net:
+ driver: bridge
diff --git a/docker-compose.yaml b/docker-compose.yaml
new file mode 100644
index 0000000..d152669
--- /dev/null
+++ b/docker-compose.yaml
@@ -0,0 +1,50 @@
+version: "3.8"
+
+services:
+ go-app:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ container_name: laclm
+ ports:
+ - "8080:8080"
+ volumes:
+ - ./config.yaml:/app/config.yaml
+ - ./.env:/app/.env
+ - ./logs/app.log:/app/logs/app.log
+ - /tmp/testnfs:/mnt/nfs-system
+ depends_on:
+ - openldap
+# - redis
+ networks:
+ - backend
+
+ openldap:
+ image: osixia/openldap:latest
+ container_name: institutional-openldap
+ environment:
+ LDAP_ORGANISATION: "My Org"
+ LDAP_DOMAIN: "myorg.local"
+ LDAP_ADMIN_PASSWORD: "admin"
+ ports:
+ - "389:389"
+ - "636:636"
+ networks:
+ - backend
+
+ phpldapadmin:
+ image: osixia/phpldapadmin:0.9.0
+ container_name: phpldapadmin
+ environment:
+ PHPLDAPADMIN_LDAP_HOSTS: openldap
+ PHPLDAPADMIN_HTTPS: "false"
+ ports:
+ - "8090:80"
+ depends_on:
+ - openldap
+ networks:
+ - backend
+
+networks:
+ backend:
+ driver: bridge
diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh
new file mode 100755
index 0000000..817d1c1
--- /dev/null
+++ b/docker-entrypoint.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# Load env vars
+if [ -f .env ]; then
+ export $(grep -v '^#' .env | xargs)
+fi
+
+# Run the binary
+exec ./bin/laclm --config config.yaml
diff --git a/go.mod b/go.mod
index 8e451c6..8378583 100644
--- a/go.mod
+++ b/go.mod
@@ -1,3 +1,43 @@
module github.com/PythonHacker24/linux-acl-management-backend
go 1.24.2
+
+require (
+ github.com/MakeNowJust/heredoc v1.0.0
+ github.com/davecgh/go-spew v1.1.1
+ github.com/go-ldap/ldap/v3 v3.4.11
+ github.com/golang-jwt/jwt/v5 v5.2.2
+ github.com/google/uuid v1.6.0
+ github.com/gorilla/websocket v1.5.3
+ github.com/jackc/pgx/v5 v5.7.5
+ github.com/joho/godotenv v1.5.1
+ github.com/redis/go-redis/v9 v9.9.0
+ github.com/spf13/cobra v1.9.1
+ go.uber.org/automaxprocs v1.6.0
+ go.uber.org/zap v1.27.0
+ google.golang.org/grpc v1.73.0
+ google.golang.org/protobuf v1.36.6
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1
+ gopkg.in/yaml.v3 v3.0.1
+)
+
+require (
+ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+ github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/jackc/pgpassfile v1.0.0 // indirect
+ github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
+ github.com/jackc/puddle/v2 v2.2.2 // indirect
+ github.com/rogpeppe/go-internal v1.14.1 // indirect
+ github.com/spf13/pflag v1.0.6 // indirect
+ github.com/stretchr/testify v1.10.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/crypto v0.37.0 // indirect
+ golang.org/x/net v0.39.0 // indirect
+ golang.org/x/sync v0.13.0 // indirect
+ golang.org/x/sys v0.32.0 // indirect
+ golang.org/x/text v0.24.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..56cb155
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,128 @@
+github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
+github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
+github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
+github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
+github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
+github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
+github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
+github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
+github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
+github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
+github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs=
+github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
+github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
+github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
+github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
+github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
+github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
+github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
+github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=
+github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
+github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
+github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
+github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=
+github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
+github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
+github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
+github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
+github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
+github.com/redis/go-redis/v9 v9.9.0 h1:URbPQ4xVQSQhZ27WMQVmZSo3uT3pL+4IdHVcYq2nVfM=
+github.com/redis/go-redis/v9 v9.9.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
+go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
+go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
+go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
+go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
+go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
+go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
+go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
+go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
+go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
+golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
+golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
+golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
+golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
+golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
+google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/internal/auth/auth.go b/internal/auth/auth.go
new file mode 100644
index 0000000..a61483f
--- /dev/null
+++ b/internal/auth/auth.go
@@ -0,0 +1,12 @@
+package auth
+
+import (
+ "net/http"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/token"
+)
+
+/* extract username from http request */
+func ExtractDataFromRequest(r *http.Request) (string, string, error) {
+ return token.ExtractDataFromRequest(r)
+}
diff --git a/internal/auth/handler.go b/internal/auth/handler.go
new file mode 100644
index 0000000..571dde7
--- /dev/null
+++ b/internal/auth/handler.go
@@ -0,0 +1,126 @@
+package auth
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "go.uber.org/zap"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/session"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/token"
+)
+
+/* handles user login and creates a session */
+func LoginHandler(sessionManager *session.Manager) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+
+ /* POST Request only - specified in routes */
+
+ /* decode the request body */
+ var user User
+ err := json.NewDecoder(r.Body).Decode(&user)
+ if err != nil {
+ http.Error(w, "Invalid request body", http.StatusBadRequest)
+ return
+ }
+
+ /* check if username and password are specified */
+ if user.Username == "" || user.Password == "" {
+ http.Error(w, "Username and password are required", http.StatusBadRequest)
+ return
+ }
+
+ /* authenticate the user */
+ authStatus := AuthenticateUser(user.Username,
+ user.Password,
+ config.BackendConfig.Authentication.LDAPConfig.SearchBase,
+ )
+
+ /* check if authentication is successful */
+ if !authStatus {
+ zap.L().Warn("User with invalid credentials attempted to log in")
+ http.Error(w, "Invalid credentials", http.StatusUnauthorized)
+ return
+ }
+
+ /*
+ check if the session already exists in the manager.
+ if it exists, refresh it's timer and return a jwt token
+ */
+
+ /* create session for the user */
+ sessionID, err := sessionManager.CreateSession(user.Username, r.RemoteAddr, r.UserAgent())
+ if err != nil {
+ zap.L().Error("Error creating session",
+ zap.Error(err),
+ )
+ http.Error(w, "Error creating session", http.StatusInternalServerError)
+ return
+ }
+
+ /* generate JWT for user interaction */
+ token, err := token.GenerateJWT(user.Username, sessionID)
+ if err != nil {
+ zap.L().Error("Error generating token",
+ zap.Error(err),
+ )
+ http.Error(w, "Error generating token", http.StatusInternalServerError)
+ return
+ }
+
+ /* create auth successful response */
+ response := map[string]string{"token": token}
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(response); err != nil {
+ zap.L().Error("Failed to encode response for login request",
+ zap.Error(err),
+ )
+ http.Error(w, "Failed to encode response for login request", http.StatusInternalServerError)
+ return
+ }
+ }
+}
+
+/* handles user logout and expire session */
+func LogoutHandler(sessionManager *session.Manager) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+
+ /* authenticate the request through JWT */
+ username, _, err := token.ExtractDataFromRequest(r)
+ if err != nil {
+ zap.L().Info("Error during token extraction in logout",
+ zap.Error(err),
+ )
+ http.Error(w, "Error during token extraction in logout", http.StatusInternalServerError)
+ return
+ }
+
+ err = sessionManager.ExpireSession(username)
+ if err != nil {
+ zap.L().Error("Failed to expire session during logout",
+ zap.Error(err),
+ )
+ http.Error(w, "Failed to expire session during logout", http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+ }
+}
+
+/* validate a token */
+func ValidateToken(w http.ResponseWriter, r *http.Request) {
+
+ /* authenticate the request through JWT */
+ _, _, err := token.ExtractDataFromRequest(r)
+ if err != nil {
+ zap.L().Info("Error during authentication",
+ zap.Error(err),
+ )
+ http.Error(w, "Authentication Failed", http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+}
diff --git a/internal/auth/ldap.go b/internal/auth/ldap.go
new file mode 100644
index 0000000..9c50bf7
--- /dev/null
+++ b/internal/auth/ldap.go
@@ -0,0 +1,108 @@
+package auth
+
+import (
+ "crypto/tls"
+ "fmt"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+ "github.com/go-ldap/ldap/v3"
+ "go.uber.org/zap"
+)
+
+/* authenticate a user with ldap */
+func AuthenticateUser(username, password, searchbase string) bool {
+
+ /*
+ authenticate is a critical functionality
+ hence, it's implementation needs to be simplistic
+ we only return true or false for authentication
+ true is returned only if all the elimination steps are passed
+ reducing unauthorized access in edge cases
+ */
+
+ var l *ldap.Conn
+ var err error
+ ldapAddress := config.BackendConfig.Authentication.LDAPConfig.Address
+
+ /* check if TLS is enabled */
+ if config.BackendConfig.Authentication.LDAPConfig.TLS {
+ l, err = ldap.DialURL(ldapAddress, ldap.DialWithTLSConfig(&tls.Config{
+
+ /* true if using self-signed certs (not recommended) */
+ InsecureSkipVerify: true,
+ }))
+ } else {
+ l, err = ldap.DialURL(ldapAddress)
+ }
+
+ if err != nil {
+ zap.L().Error("Failed to connect to LDAP Server",
+ zap.Error(err),
+ )
+ return false
+ }
+ defer l.Close()
+
+ /* authenticating with the ldap server with admin */
+ err = l.Bind(config.BackendConfig.Authentication.LDAPConfig.AdminDN,
+ config.BackendConfig.Authentication.LDAPConfig.AdminPassword,
+ )
+ if err != nil {
+ zap.L().Error("Admin authentication failed",
+ zap.Error(err),
+ )
+ return false
+ }
+
+ /* creating a search request for ldap server */
+ searchRequest := ldap.NewSearchRequest(
+ searchbase,
+ ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
+
+ /* Searching by username */
+ /* for uid -> fmt.Sprintf("(uid=%s)", username), */
+ // fmt.Sprintf("(cn=%s)", username),
+ fmt.Sprintf("(uid=%s)", username),
+
+ /* We only need the DN */
+ []string{"dn"},
+ nil,
+ )
+
+ /* searching the ldap server for credentials */
+ searchResult, err := l.Search(searchRequest)
+ if err != nil {
+ zap.L().Error("LDAP search failed",
+ zap.Error(err),
+ )
+ return false
+ }
+
+ /* checking if search result is empty */
+ if len(searchResult.Entries) == 0 {
+ zap.L().Error("User not found in LDAP",
+ zap.String("username", username),
+ zap.Error(err),
+ )
+ return false
+ }
+
+ userDN := searchResult.Entries[0].DN
+
+ /* checking if the user exists */
+ err = l.Bind(userDN, password)
+ if err != nil {
+ zap.L().Error("User authentication failed",
+ zap.String("Username", username),
+ zap.Error(err),
+ )
+ return false
+ }
+
+ /* authentication successful */
+ zap.L().Info("User authentication successful",
+ zap.String("username", username),
+ )
+
+ return true
+}
diff --git a/internal/auth/model.go b/internal/auth/model.go
new file mode 100644
index 0000000..9eae9d5
--- /dev/null
+++ b/internal/auth/model.go
@@ -0,0 +1,7 @@
+package auth
+
+/* username and password */
+type User struct {
+ Username string `json:"username"`
+ Password string `json:"password"`
+}
diff --git a/internal/grpcpool/clientpool.go b/internal/grpcpool/clientpool.go
new file mode 100644
index 0000000..478f3e4
--- /dev/null
+++ b/internal/grpcpool/clientpool.go
@@ -0,0 +1,78 @@
+package grpcpool
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc"
+)
+
+/* creates a new client pool */
+func NewClientPool(opts ...grpc.DialOption) *ClientPool {
+ return &ClientPool{
+ conns: make(map[string]*grpc.ClientConn),
+ dialOptions: opts,
+ stopCh: make(chan struct{}),
+ }
+}
+
+/*
+ creates a connection to given server
+ allows multiple connections to be established to daemons for any transactions in execution
+*/
+func (p *ClientPool) GetConn(addr string, errCh chan<- error) (*grpc.ClientConn, error) {
+ /* check if connection exists or not */
+ p.mu.RLock()
+ conn, exists := p.conns[addr]
+ p.mu.RUnlock()
+
+ /* return the connection if it exists */
+ if exists {
+ return conn, nil
+ }
+
+ /* so connection doesn't exist, create a new one */
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ /* double check again (might been created between if exists and this line) */
+ conn, exists = p.conns[addr]
+ if exists {
+ return conn, nil
+ }
+
+ /* create a new client for gRPC server */
+ newConn, err := grpc.NewClient(addr, p.dialOptions...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to add new connection: %w", err)
+ }
+
+ /* add connection to the pool */
+ p.conns[addr] = newConn
+
+ /*
+ in case of connection issues, it will remove itself from connection pool
+ when connection is demanded again, whole logic written above will be executed again
+ */
+ go p.MonitorHealth(addr, newConn, errCh)
+
+ /* return connection */
+ return newConn, nil
+}
+
+/*
+ close all connections in the pool
+ call this while error channel exists
+*/
+func (p *ClientPool) CloseAll(errCh chan<- error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ /* iterate over all the connections and attempt to close them all */
+ for _, conn := range p.conns {
+ if err := conn.Close(); err != nil {
+ errCh <- fmt.Errorf("error while closing gRPC connection: %w", err)
+ }
+ }
+
+ p.conns = make(map[string]*grpc.ClientConn)
+}
diff --git a/internal/grpcpool/model.go b/internal/grpcpool/model.go
new file mode 100644
index 0000000..aa08235
--- /dev/null
+++ b/internal/grpcpool/model.go
@@ -0,0 +1,14 @@
+package grpcpool
+
+import (
+ "google.golang.org/grpc"
+ "sync"
+)
+
+/* gRPC connection pool for daemons */
+type ClientPool struct {
+ mu sync.RWMutex
+ conns map[string]*grpc.ClientConn
+ dialOptions []grpc.DialOption
+ stopCh chan struct{}
+}
diff --git a/internal/grpcpool/monitor.go b/internal/grpcpool/monitor.go
new file mode 100644
index 0000000..d077784
--- /dev/null
+++ b/internal/grpcpool/monitor.go
@@ -0,0 +1,45 @@
+package grpcpool
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ pb "github.com/PythonHacker24/linux-acl-management-backend/proto"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+)
+
+/* monitor gRPC connections */
+func (p *ClientPool) MonitorHealth(addr string, conn *grpc.ClientConn, errCh chan<- error) {
+ /* TODO: make it configurable */
+ ticker := time.NewTicker(10 * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-p.stopCh:
+ return
+ case <-ticker.C:
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ pingClient := pb.NewPingServiceClient(conn)
+ _, err := pingClient.Ping(ctx, &pb.PingRequest{})
+ cancel()
+
+ if err != nil {
+ errCh <- fmt.Errorf("ping failed for daemon at %s: %w", addr, err)
+
+ p.mu.Lock()
+ conn.Close()
+ delete(p.conns, addr)
+ p.mu.Unlock()
+
+ return
+ } else {
+ zap.L().Info("Ping success",
+ zap.String("Address", addr),
+ )
+ }
+ }
+ }
+}
diff --git a/internal/handlers/handlers.go b/internal/handlers/handlers.go
deleted file mode 100644
index e69de29..0000000
diff --git a/internal/health/handler.go b/internal/health/handler.go
new file mode 100644
index 0000000..426dfe0
--- /dev/null
+++ b/internal/health/handler.go
@@ -0,0 +1,25 @@
+package health
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "go.uber.org/zap"
+)
+
+/* health handler provides status check on the backend server */
+func HealthHandler(w http.ResponseWriter, r *http.Request) {
+ var response HealthResponse
+
+ /* set the content type and write the response */
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+
+ /* set the status to ok */
+ response.Status = "ok"
+ if err := json.NewEncoder(w).Encode(response); err != nil {
+ zap.L().Error("Failed to send health response from the handler",
+ zap.Error(err),
+ )
+ }
+}
diff --git a/internal/health/model.go b/internal/health/model.go
new file mode 100644
index 0000000..4a8f859
--- /dev/null
+++ b/internal/health/model.go
@@ -0,0 +1,6 @@
+package health
+
+/* health response */
+type HealthResponse struct {
+ Status string `json:"status"`
+}
diff --git a/internal/ldap/ldap.go b/internal/ldap/ldap.go
deleted file mode 100644
index e69de29..0000000
diff --git a/internal/models/models.go b/internal/models/models.go
deleted file mode 100644
index e69de29..0000000
diff --git a/internal/postgresql/db.go b/internal/postgresql/db.go
new file mode 100644
index 0000000..41db43d
--- /dev/null
+++ b/internal/postgresql/db.go
@@ -0,0 +1,32 @@
+// Code generated by sqlc. DO NOT EDIT.
+// versions:
+// sqlc v1.29.0
+
+package postgresql
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+type DBTX interface {
+ Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
+ Query(context.Context, string, ...interface{}) (pgx.Rows, error)
+ QueryRow(context.Context, string, ...interface{}) pgx.Row
+}
+
+func New(db DBTX) *Queries {
+ return &Queries{db: db}
+}
+
+type Queries struct {
+ db DBTX
+}
+
+func (q *Queries) WithTx(tx pgx.Tx) *Queries {
+ return &Queries{
+ db: tx,
+ }
+}
diff --git a/internal/postgresql/models.go b/internal/postgresql/models.go
new file mode 100644
index 0000000..0e6500b
--- /dev/null
+++ b/internal/postgresql/models.go
@@ -0,0 +1,56 @@
+// Code generated by sqlc. DO NOT EDIT.
+// versions:
+// sqlc v1.29.0
+
+package postgresql
+
+import (
+ "github.com/google/uuid"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+type PendingTransactionsArchive struct {
+ ID uuid.UUID `json:"id"`
+ SessionID uuid.UUID `json:"session_id"`
+ Timestamp pgtype.Timestamptz `json:"timestamp"`
+ Operation string `json:"operation"`
+ TargetPath string `json:"target_path"`
+ Entries []byte `json:"entries"`
+ Status string `json:"status"`
+ ErrorMsg pgtype.Text `json:"error_msg"`
+ Output pgtype.Text `json:"output"`
+ ExecutedBy string `json:"executed_by"`
+ DurationMs pgtype.Int8 `json:"duration_ms"`
+ Execstatus bool `json:"execstatus"`
+ CreatedAt pgtype.Timestamptz `json:"created_at"`
+}
+
+type ResultsTransactionsArchive struct {
+ ID uuid.UUID `json:"id"`
+ SessionID uuid.UUID `json:"session_id"`
+ Timestamp pgtype.Timestamptz `json:"timestamp"`
+ Operation string `json:"operation"`
+ TargetPath string `json:"target_path"`
+ Entries []byte `json:"entries"`
+ Status string `json:"status"`
+ ErrorMsg pgtype.Text `json:"error_msg"`
+ Output pgtype.Text `json:"output"`
+ ExecutedBy string `json:"executed_by"`
+ DurationMs pgtype.Int8 `json:"duration_ms"`
+ Execstatus bool `json:"execstatus"`
+ CreatedAt pgtype.Timestamptz `json:"created_at"`
+}
+
+type SessionsArchive struct {
+ ID uuid.UUID `json:"id"`
+ Username string `json:"username"`
+ Ip pgtype.Text `json:"ip"`
+ UserAgent pgtype.Text `json:"user_agent"`
+ Status string `json:"status"`
+ CreatedAt pgtype.Timestamp `json:"created_at"`
+ LastActiveAt pgtype.Timestamp `json:"last_active_at"`
+ Expiry pgtype.Timestamp `json:"expiry"`
+ CompletedCount pgtype.Int4 `json:"completed_count"`
+ FailedCount pgtype.Int4 `json:"failed_count"`
+ ArchivedAt pgtype.Timestamp `json:"archived_at"`
+}
diff --git a/internal/postgresql/pending_transactions.sql.go b/internal/postgresql/pending_transactions.sql.go
new file mode 100644
index 0000000..90a97b7
--- /dev/null
+++ b/internal/postgresql/pending_transactions.sql.go
@@ -0,0 +1,449 @@
+// Code generated by sqlc. DO NOT EDIT.
+// versions:
+// sqlc v1.29.0
+// source: pending_transactions.sql
+
+package postgresql
+
+import (
+ "context"
+
+ "github.com/google/uuid"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+const countPendingTransactionsByOperationPQ = `-- name: CountPendingTransactionsByOperationPQ :one
+SELECT COUNT(*) FROM pending_transactions_archive
+WHERE session_id = $1 AND operation = $2
+`
+
+type CountPendingTransactionsByOperationPQParams struct {
+ SessionID uuid.UUID `json:"session_id"`
+ Operation string `json:"operation"`
+}
+
+func (q *Queries) CountPendingTransactionsByOperationPQ(ctx context.Context, arg CountPendingTransactionsByOperationPQParams) (int64, error) {
+ row := q.db.QueryRow(ctx, countPendingTransactionsByOperationPQ, arg.SessionID, arg.Operation)
+ var count int64
+ err := row.Scan(&count)
+ return count, err
+}
+
+const countPendingTransactionsByStatusPQ = `-- name: CountPendingTransactionsByStatusPQ :one
+SELECT COUNT(*) FROM pending_transactions_archive
+WHERE session_id = $1 AND status = $2
+`
+
+type CountPendingTransactionsByStatusPQParams struct {
+ SessionID uuid.UUID `json:"session_id"`
+ Status string `json:"status"`
+}
+
+func (q *Queries) CountPendingTransactionsByStatusPQ(ctx context.Context, arg CountPendingTransactionsByStatusPQParams) (int64, error) {
+ row := q.db.QueryRow(ctx, countPendingTransactionsByStatusPQ, arg.SessionID, arg.Status)
+ var count int64
+ err := row.Scan(&count)
+ return count, err
+}
+
+const createPendingTransactionPQ = `-- name: CreatePendingTransactionPQ :one
+INSERT INTO pending_transactions_archive (
+ id,
+ session_id,
+ timestamp,
+ operation,
+ target_path,
+ entries,
+ status,
+ error_msg,
+ output,
+ executed_by,
+ duration_ms,
+ ExecStatus
+) VALUES (
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12
+) RETURNING id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at
+`
+
+type CreatePendingTransactionPQParams struct {
+ ID uuid.UUID `json:"id"`
+ SessionID uuid.UUID `json:"session_id"`
+ Timestamp pgtype.Timestamptz `json:"timestamp"`
+ Operation string `json:"operation"`
+ TargetPath string `json:"target_path"`
+ Entries []byte `json:"entries"`
+ Status string `json:"status"`
+ ErrorMsg pgtype.Text `json:"error_msg"`
+ Output pgtype.Text `json:"output"`
+ ExecutedBy string `json:"executed_by"`
+ DurationMs pgtype.Int8 `json:"duration_ms"`
+ Execstatus bool `json:"execstatus"`
+}
+
+func (q *Queries) CreatePendingTransactionPQ(ctx context.Context, arg CreatePendingTransactionPQParams) (PendingTransactionsArchive, error) {
+ row := q.db.QueryRow(ctx, createPendingTransactionPQ,
+ arg.ID,
+ arg.SessionID,
+ arg.Timestamp,
+ arg.Operation,
+ arg.TargetPath,
+ arg.Entries,
+ arg.Status,
+ arg.ErrorMsg,
+ arg.Output,
+ arg.ExecutedBy,
+ arg.DurationMs,
+ arg.Execstatus,
+ )
+ var i PendingTransactionsArchive
+ err := row.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ )
+ return i, err
+}
+
+const deletePendingTransactionPQ = `-- name: DeletePendingTransactionPQ :exec
+DELETE FROM pending_transactions_archive
+WHERE id = $1
+`
+
+func (q *Queries) DeletePendingTransactionPQ(ctx context.Context, id uuid.UUID) error {
+ _, err := q.db.Exec(ctx, deletePendingTransactionPQ, id)
+ return err
+}
+
+const deletePendingTransactionsBySessionPQ = `-- name: DeletePendingTransactionsBySessionPQ :exec
+DELETE FROM pending_transactions_archive
+WHERE session_id = $1
+`
+
+func (q *Queries) DeletePendingTransactionsBySessionPQ(ctx context.Context, sessionID uuid.UUID) error {
+ _, err := q.db.Exec(ctx, deletePendingTransactionsBySessionPQ, sessionID)
+ return err
+}
+
+const getPendingTransactionPQ = `-- name: GetPendingTransactionPQ :one
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM pending_transactions_archive
+WHERE id = $1
+`
+
+func (q *Queries) GetPendingTransactionPQ(ctx context.Context, id uuid.UUID) (PendingTransactionsArchive, error) {
+ row := q.db.QueryRow(ctx, getPendingTransactionPQ, id)
+ var i PendingTransactionsArchive
+ err := row.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ )
+ return i, err
+}
+
+const getPendingTransactionStatsPQ = `-- name: GetPendingTransactionStatsPQ :one
+SELECT
+ COUNT(*) as total_transactions,
+ COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_transactions,
+ AVG(duration_ms) as avg_duration_ms
+FROM pending_transactions_archive
+WHERE session_id = $1
+`
+
+type GetPendingTransactionStatsPQRow struct {
+ TotalTransactions int64 `json:"total_transactions"`
+ PendingTransactions int64 `json:"pending_transactions"`
+ AvgDurationMs float64 `json:"avg_duration_ms"`
+}
+
+func (q *Queries) GetPendingTransactionStatsPQ(ctx context.Context, sessionID uuid.UUID) (GetPendingTransactionStatsPQRow, error) {
+ row := q.db.QueryRow(ctx, getPendingTransactionStatsPQ, sessionID)
+ var i GetPendingTransactionStatsPQRow
+ err := row.Scan(&i.TotalTransactions, &i.PendingTransactions, &i.AvgDurationMs)
+ return i, err
+}
+
+const getPendingTransactionsByOperationPQ = `-- name: GetPendingTransactionsByOperationPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM pending_transactions_archive
+WHERE session_id = $1 AND operation = $2
+ORDER BY timestamp DESC
+`
+
+type GetPendingTransactionsByOperationPQParams struct {
+ SessionID uuid.UUID `json:"session_id"`
+ Operation string `json:"operation"`
+}
+
+func (q *Queries) GetPendingTransactionsByOperationPQ(ctx context.Context, arg GetPendingTransactionsByOperationPQParams) ([]PendingTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getPendingTransactionsByOperationPQ, arg.SessionID, arg.Operation)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []PendingTransactionsArchive{}
+ for rows.Next() {
+ var i PendingTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getPendingTransactionsByPathPQ = `-- name: GetPendingTransactionsByPathPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM pending_transactions_archive
+WHERE session_id = $1 AND target_path = $2
+ORDER BY timestamp DESC
+`
+
+type GetPendingTransactionsByPathPQParams struct {
+ SessionID uuid.UUID `json:"session_id"`
+ TargetPath string `json:"target_path"`
+}
+
+func (q *Queries) GetPendingTransactionsByPathPQ(ctx context.Context, arg GetPendingTransactionsByPathPQParams) ([]PendingTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getPendingTransactionsByPathPQ, arg.SessionID, arg.TargetPath)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []PendingTransactionsArchive{}
+ for rows.Next() {
+ var i PendingTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getPendingTransactionsBySessionPQ = `-- name: GetPendingTransactionsBySessionPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM pending_transactions_archive
+WHERE session_id = $1
+ORDER BY timestamp DESC
+`
+
+func (q *Queries) GetPendingTransactionsBySessionPQ(ctx context.Context, sessionID uuid.UUID) ([]PendingTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getPendingTransactionsBySessionPQ, sessionID)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []PendingTransactionsArchive{}
+ for rows.Next() {
+ var i PendingTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getPendingTransactionsByUserPaginatedPQ = `-- name: GetPendingTransactionsByUserPaginatedPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM pending_transactions_archive
+WHERE executed_by = $1
+ORDER BY timestamp DESC
+LIMIT $2 OFFSET $3
+`
+
+type GetPendingTransactionsByUserPaginatedPQParams struct {
+ ExecutedBy string `json:"executed_by"`
+ Limit int32 `json:"limit"`
+ Offset int32 `json:"offset"`
+}
+
+func (q *Queries) GetPendingTransactionsByUserPaginatedPQ(ctx context.Context, arg GetPendingTransactionsByUserPaginatedPQParams) ([]PendingTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getPendingTransactionsByUserPaginatedPQ, arg.ExecutedBy, arg.Limit, arg.Offset)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []PendingTransactionsArchive{}
+ for rows.Next() {
+ var i PendingTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getPendingTransactionsPQ = `-- name: GetPendingTransactionsPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM pending_transactions_archive
+WHERE session_id = $1 AND status = 'pending'
+ORDER BY timestamp DESC
+`
+
+func (q *Queries) GetPendingTransactionsPQ(ctx context.Context, sessionID uuid.UUID) ([]PendingTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getPendingTransactionsPQ, sessionID)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []PendingTransactionsArchive{}
+ for rows.Next() {
+ var i PendingTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const updatePendingTransactionStatusPQ = `-- name: UpdatePendingTransactionStatusPQ :one
+UPDATE pending_transactions_archive
+SET
+ status = $2,
+ error_msg = $3,
+ output = $4,
+ duration_ms = $5,
+ ExecStatus = $6
+WHERE id = $1
+RETURNING id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at
+`
+
+type UpdatePendingTransactionStatusPQParams struct {
+ ID uuid.UUID `json:"id"`
+ Status string `json:"status"`
+ ErrorMsg pgtype.Text `json:"error_msg"`
+ Output pgtype.Text `json:"output"`
+ DurationMs pgtype.Int8 `json:"duration_ms"`
+ Execstatus bool `json:"execstatus"`
+}
+
+func (q *Queries) UpdatePendingTransactionStatusPQ(ctx context.Context, arg UpdatePendingTransactionStatusPQParams) (PendingTransactionsArchive, error) {
+ row := q.db.QueryRow(ctx, updatePendingTransactionStatusPQ,
+ arg.ID,
+ arg.Status,
+ arg.ErrorMsg,
+ arg.Output,
+ arg.DurationMs,
+ arg.Execstatus,
+ )
+ var i PendingTransactionsArchive
+ err := row.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ )
+ return i, err
+}
diff --git a/internal/postgresql/querier.go b/internal/postgresql/querier.go
new file mode 100644
index 0000000..e32d64e
--- /dev/null
+++ b/internal/postgresql/querier.go
@@ -0,0 +1,47 @@
+// Code generated by sqlc. DO NOT EDIT.
+// versions:
+// sqlc v1.29.0
+
+package postgresql
+
+import (
+ "context"
+
+ "github.com/google/uuid"
+)
+
+type Querier interface {
+ CountPendingTransactionsByOperationPQ(ctx context.Context, arg CountPendingTransactionsByOperationPQParams) (int64, error)
+ CountPendingTransactionsByStatusPQ(ctx context.Context, arg CountPendingTransactionsByStatusPQParams) (int64, error)
+ CountResultsTransactionsByOperationPQ(ctx context.Context, arg CountResultsTransactionsByOperationPQParams) (int64, error)
+ CountResultsTransactionsByStatusPQ(ctx context.Context, arg CountResultsTransactionsByStatusPQParams) (int64, error)
+ CreatePendingTransactionPQ(ctx context.Context, arg CreatePendingTransactionPQParams) (PendingTransactionsArchive, error)
+ CreateResultsTransactionPQ(ctx context.Context, arg CreateResultsTransactionPQParams) (ResultsTransactionsArchive, error)
+ DeletePendingTransactionPQ(ctx context.Context, id uuid.UUID) error
+ DeletePendingTransactionsBySessionPQ(ctx context.Context, sessionID uuid.UUID) error
+ DeleteResultsTransactionPQ(ctx context.Context, id uuid.UUID) error
+ DeleteResultsTransactionsBySessionPQ(ctx context.Context, sessionID uuid.UUID) error
+ DeleteSessionPQ(ctx context.Context, id uuid.UUID) error
+ GetFailedResultsTransactionsPQ(ctx context.Context, sessionID uuid.UUID) ([]ResultsTransactionsArchive, error)
+ GetPendingTransactionPQ(ctx context.Context, id uuid.UUID) (PendingTransactionsArchive, error)
+ GetPendingTransactionStatsPQ(ctx context.Context, sessionID uuid.UUID) (GetPendingTransactionStatsPQRow, error)
+ GetPendingTransactionsByOperationPQ(ctx context.Context, arg GetPendingTransactionsByOperationPQParams) ([]PendingTransactionsArchive, error)
+ GetPendingTransactionsByPathPQ(ctx context.Context, arg GetPendingTransactionsByPathPQParams) ([]PendingTransactionsArchive, error)
+ GetPendingTransactionsBySessionPQ(ctx context.Context, sessionID uuid.UUID) ([]PendingTransactionsArchive, error)
+ GetPendingTransactionsByUserPaginatedPQ(ctx context.Context, arg GetPendingTransactionsByUserPaginatedPQParams) ([]PendingTransactionsArchive, error)
+ GetPendingTransactionsPQ(ctx context.Context, sessionID uuid.UUID) ([]PendingTransactionsArchive, error)
+ GetResultsTransactionPQ(ctx context.Context, id uuid.UUID) (ResultsTransactionsArchive, error)
+ GetResultsTransactionStatsPQ(ctx context.Context, sessionID uuid.UUID) (GetResultsTransactionStatsPQRow, error)
+ GetResultsTransactionsByOperationPQ(ctx context.Context, arg GetResultsTransactionsByOperationPQParams) ([]ResultsTransactionsArchive, error)
+ GetResultsTransactionsByPathPQ(ctx context.Context, arg GetResultsTransactionsByPathPQParams) ([]ResultsTransactionsArchive, error)
+ GetResultsTransactionsBySessionPQ(ctx context.Context, sessionID uuid.UUID) ([]ResultsTransactionsArchive, error)
+ GetResultsTransactionsByUserPaginatedPQ(ctx context.Context, arg GetResultsTransactionsByUserPaginatedPQParams) ([]ResultsTransactionsArchive, error)
+ GetSessionByUsernamePaginatedPQ(ctx context.Context, arg GetSessionByUsernamePaginatedPQParams) ([]SessionsArchive, error)
+ GetSessionPQ(ctx context.Context, id uuid.UUID) (SessionsArchive, error)
+ GetSuccessfulResultsTransactionsPQ(ctx context.Context, sessionID uuid.UUID) ([]ResultsTransactionsArchive, error)
+ StoreSessionPQ(ctx context.Context, arg StoreSessionPQParams) (SessionsArchive, error)
+ UpdatePendingTransactionStatusPQ(ctx context.Context, arg UpdatePendingTransactionStatusPQParams) (PendingTransactionsArchive, error)
+ UpdateResultsTransactionStatusPQ(ctx context.Context, arg UpdateResultsTransactionStatusPQParams) (ResultsTransactionsArchive, error)
+}
+
+var _ Querier = (*Queries)(nil)
diff --git a/internal/postgresql/results_transactions.sql.go b/internal/postgresql/results_transactions.sql.go
new file mode 100644
index 0000000..9170881
--- /dev/null
+++ b/internal/postgresql/results_transactions.sql.go
@@ -0,0 +1,496 @@
+// Code generated by sqlc. DO NOT EDIT.
+// versions:
+// sqlc v1.29.0
+// source: results_transactions.sql
+
+package postgresql
+
+import (
+ "context"
+
+ "github.com/google/uuid"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+const countResultsTransactionsByOperationPQ = `-- name: CountResultsTransactionsByOperationPQ :one
+SELECT COUNT(*) FROM results_transactions_archive
+WHERE session_id = $1 AND operation = $2
+`
+
+type CountResultsTransactionsByOperationPQParams struct {
+ SessionID uuid.UUID `json:"session_id"`
+ Operation string `json:"operation"`
+}
+
+func (q *Queries) CountResultsTransactionsByOperationPQ(ctx context.Context, arg CountResultsTransactionsByOperationPQParams) (int64, error) {
+ row := q.db.QueryRow(ctx, countResultsTransactionsByOperationPQ, arg.SessionID, arg.Operation)
+ var count int64
+ err := row.Scan(&count)
+ return count, err
+}
+
+const countResultsTransactionsByStatusPQ = `-- name: CountResultsTransactionsByStatusPQ :one
+SELECT COUNT(*) FROM results_transactions_archive
+WHERE session_id = $1 AND status = $2
+`
+
+type CountResultsTransactionsByStatusPQParams struct {
+ SessionID uuid.UUID `json:"session_id"`
+ Status string `json:"status"`
+}
+
+func (q *Queries) CountResultsTransactionsByStatusPQ(ctx context.Context, arg CountResultsTransactionsByStatusPQParams) (int64, error) {
+ row := q.db.QueryRow(ctx, countResultsTransactionsByStatusPQ, arg.SessionID, arg.Status)
+ var count int64
+ err := row.Scan(&count)
+ return count, err
+}
+
+const createResultsTransactionPQ = `-- name: CreateResultsTransactionPQ :one
+INSERT INTO results_transactions_archive (
+ id,
+ session_id,
+ timestamp,
+ operation,
+ target_path,
+ entries,
+ status,
+ error_msg,
+ output,
+ executed_by,
+ duration_ms,
+ ExecStatus
+) VALUES (
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12
+) RETURNING id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at
+`
+
+type CreateResultsTransactionPQParams struct {
+ ID uuid.UUID `json:"id"`
+ SessionID uuid.UUID `json:"session_id"`
+ Timestamp pgtype.Timestamptz `json:"timestamp"`
+ Operation string `json:"operation"`
+ TargetPath string `json:"target_path"`
+ Entries []byte `json:"entries"`
+ Status string `json:"status"`
+ ErrorMsg pgtype.Text `json:"error_msg"`
+ Output pgtype.Text `json:"output"`
+ ExecutedBy string `json:"executed_by"`
+ DurationMs pgtype.Int8 `json:"duration_ms"`
+ Execstatus bool `json:"execstatus"`
+}
+
+func (q *Queries) CreateResultsTransactionPQ(ctx context.Context, arg CreateResultsTransactionPQParams) (ResultsTransactionsArchive, error) {
+ row := q.db.QueryRow(ctx, createResultsTransactionPQ,
+ arg.ID,
+ arg.SessionID,
+ arg.Timestamp,
+ arg.Operation,
+ arg.TargetPath,
+ arg.Entries,
+ arg.Status,
+ arg.ErrorMsg,
+ arg.Output,
+ arg.ExecutedBy,
+ arg.DurationMs,
+ arg.Execstatus,
+ )
+ var i ResultsTransactionsArchive
+ err := row.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ )
+ return i, err
+}
+
+const deleteResultsTransactionPQ = `-- name: DeleteResultsTransactionPQ :exec
+DELETE FROM results_transactions_archive
+WHERE id = $1
+`
+
+func (q *Queries) DeleteResultsTransactionPQ(ctx context.Context, id uuid.UUID) error {
+ _, err := q.db.Exec(ctx, deleteResultsTransactionPQ, id)
+ return err
+}
+
+const deleteResultsTransactionsBySessionPQ = `-- name: DeleteResultsTransactionsBySessionPQ :exec
+DELETE FROM results_transactions_archive
+WHERE session_id = $1
+`
+
+func (q *Queries) DeleteResultsTransactionsBySessionPQ(ctx context.Context, sessionID uuid.UUID) error {
+ _, err := q.db.Exec(ctx, deleteResultsTransactionsBySessionPQ, sessionID)
+ return err
+}
+
+const getFailedResultsTransactionsPQ = `-- name: GetFailedResultsTransactionsPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM results_transactions_archive
+WHERE session_id = $1 AND status = 'failed'
+ORDER BY timestamp DESC
+`
+
+func (q *Queries) GetFailedResultsTransactionsPQ(ctx context.Context, sessionID uuid.UUID) ([]ResultsTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getFailedResultsTransactionsPQ, sessionID)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []ResultsTransactionsArchive{}
+ for rows.Next() {
+ var i ResultsTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getResultsTransactionPQ = `-- name: GetResultsTransactionPQ :one
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM results_transactions_archive
+WHERE id = $1
+`
+
+func (q *Queries) GetResultsTransactionPQ(ctx context.Context, id uuid.UUID) (ResultsTransactionsArchive, error) {
+ row := q.db.QueryRow(ctx, getResultsTransactionPQ, id)
+ var i ResultsTransactionsArchive
+ err := row.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ )
+ return i, err
+}
+
+const getResultsTransactionStatsPQ = `-- name: GetResultsTransactionStatsPQ :one
+SELECT
+ COUNT(*) as total_transactions,
+ COUNT(CASE WHEN status = 'success' THEN 1 END) as successful_transactions,
+ COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_transactions,
+ AVG(duration_ms) as avg_duration_ms
+FROM results_transactions_archive
+WHERE session_id = $1
+`
+
+type GetResultsTransactionStatsPQRow struct {
+ TotalTransactions int64 `json:"total_transactions"`
+ SuccessfulTransactions int64 `json:"successful_transactions"`
+ FailedTransactions int64 `json:"failed_transactions"`
+ AvgDurationMs float64 `json:"avg_duration_ms"`
+}
+
+func (q *Queries) GetResultsTransactionStatsPQ(ctx context.Context, sessionID uuid.UUID) (GetResultsTransactionStatsPQRow, error) {
+ row := q.db.QueryRow(ctx, getResultsTransactionStatsPQ, sessionID)
+ var i GetResultsTransactionStatsPQRow
+ err := row.Scan(
+ &i.TotalTransactions,
+ &i.SuccessfulTransactions,
+ &i.FailedTransactions,
+ &i.AvgDurationMs,
+ )
+ return i, err
+}
+
+const getResultsTransactionsByOperationPQ = `-- name: GetResultsTransactionsByOperationPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM results_transactions_archive
+WHERE session_id = $1 AND operation = $2
+ORDER BY timestamp DESC
+`
+
+type GetResultsTransactionsByOperationPQParams struct {
+ SessionID uuid.UUID `json:"session_id"`
+ Operation string `json:"operation"`
+}
+
+func (q *Queries) GetResultsTransactionsByOperationPQ(ctx context.Context, arg GetResultsTransactionsByOperationPQParams) ([]ResultsTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getResultsTransactionsByOperationPQ, arg.SessionID, arg.Operation)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []ResultsTransactionsArchive{}
+ for rows.Next() {
+ var i ResultsTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getResultsTransactionsByPathPQ = `-- name: GetResultsTransactionsByPathPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM results_transactions_archive
+WHERE session_id = $1 AND target_path = $2
+ORDER BY timestamp DESC
+`
+
+type GetResultsTransactionsByPathPQParams struct {
+ SessionID uuid.UUID `json:"session_id"`
+ TargetPath string `json:"target_path"`
+}
+
+func (q *Queries) GetResultsTransactionsByPathPQ(ctx context.Context, arg GetResultsTransactionsByPathPQParams) ([]ResultsTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getResultsTransactionsByPathPQ, arg.SessionID, arg.TargetPath)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []ResultsTransactionsArchive{}
+ for rows.Next() {
+ var i ResultsTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getResultsTransactionsBySessionPQ = `-- name: GetResultsTransactionsBySessionPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM results_transactions_archive
+WHERE session_id = $1
+ORDER BY timestamp DESC
+`
+
+func (q *Queries) GetResultsTransactionsBySessionPQ(ctx context.Context, sessionID uuid.UUID) ([]ResultsTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getResultsTransactionsBySessionPQ, sessionID)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []ResultsTransactionsArchive{}
+ for rows.Next() {
+ var i ResultsTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getResultsTransactionsByUserPaginatedPQ = `-- name: GetResultsTransactionsByUserPaginatedPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM results_transactions_archive
+WHERE executed_by = $1
+ORDER BY timestamp DESC
+LIMIT $2 OFFSET $3
+`
+
+type GetResultsTransactionsByUserPaginatedPQParams struct {
+ ExecutedBy string `json:"executed_by"`
+ Limit int32 `json:"limit"`
+ Offset int32 `json:"offset"`
+}
+
+func (q *Queries) GetResultsTransactionsByUserPaginatedPQ(ctx context.Context, arg GetResultsTransactionsByUserPaginatedPQParams) ([]ResultsTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getResultsTransactionsByUserPaginatedPQ, arg.ExecutedBy, arg.Limit, arg.Offset)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []ResultsTransactionsArchive{}
+ for rows.Next() {
+ var i ResultsTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getSuccessfulResultsTransactionsPQ = `-- name: GetSuccessfulResultsTransactionsPQ :many
+SELECT id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at FROM results_transactions_archive
+WHERE session_id = $1 AND status = 'success'
+ORDER BY timestamp DESC
+`
+
+func (q *Queries) GetSuccessfulResultsTransactionsPQ(ctx context.Context, sessionID uuid.UUID) ([]ResultsTransactionsArchive, error) {
+ rows, err := q.db.Query(ctx, getSuccessfulResultsTransactionsPQ, sessionID)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []ResultsTransactionsArchive{}
+ for rows.Next() {
+ var i ResultsTransactionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const updateResultsTransactionStatusPQ = `-- name: UpdateResultsTransactionStatusPQ :one
+UPDATE results_transactions_archive
+SET
+ status = $2,
+ error_msg = $3,
+ output = $4,
+ duration_ms = $5,
+ ExecStatus = $6
+WHERE id = $1
+RETURNING id, session_id, timestamp, operation, target_path, entries, status, error_msg, output, executed_by, duration_ms, execstatus, created_at
+`
+
+type UpdateResultsTransactionStatusPQParams struct {
+ ID uuid.UUID `json:"id"`
+ Status string `json:"status"`
+ ErrorMsg pgtype.Text `json:"error_msg"`
+ Output pgtype.Text `json:"output"`
+ DurationMs pgtype.Int8 `json:"duration_ms"`
+ Execstatus bool `json:"execstatus"`
+}
+
+func (q *Queries) UpdateResultsTransactionStatusPQ(ctx context.Context, arg UpdateResultsTransactionStatusPQParams) (ResultsTransactionsArchive, error) {
+ row := q.db.QueryRow(ctx, updateResultsTransactionStatusPQ,
+ arg.ID,
+ arg.Status,
+ arg.ErrorMsg,
+ arg.Output,
+ arg.DurationMs,
+ arg.Execstatus,
+ )
+ var i ResultsTransactionsArchive
+ err := row.Scan(
+ &i.ID,
+ &i.SessionID,
+ &i.Timestamp,
+ &i.Operation,
+ &i.TargetPath,
+ &i.Entries,
+ &i.Status,
+ &i.ErrorMsg,
+ &i.Output,
+ &i.ExecutedBy,
+ &i.DurationMs,
+ &i.Execstatus,
+ &i.CreatedAt,
+ )
+ return i, err
+}
diff --git a/internal/postgresql/sessions.sql.go b/internal/postgresql/sessions.sql.go
new file mode 100644
index 0000000..1800dbc
--- /dev/null
+++ b/internal/postgresql/sessions.sql.go
@@ -0,0 +1,144 @@
+// Code generated by sqlc. DO NOT EDIT.
+// versions:
+// sqlc v1.29.0
+// source: sessions.sql
+
+package postgresql
+
+import (
+ "context"
+
+ "github.com/google/uuid"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+const deleteSessionPQ = `-- name: DeleteSessionPQ :exec
+DELETE FROM sessions_archive WHERE id = $1
+`
+
+func (q *Queries) DeleteSessionPQ(ctx context.Context, id uuid.UUID) error {
+ _, err := q.db.Exec(ctx, deleteSessionPQ, id)
+ return err
+}
+
+const getSessionByUsernamePaginatedPQ = `-- name: GetSessionByUsernamePaginatedPQ :many
+SELECT id, username, ip, user_agent, status, created_at, last_active_at, expiry, completed_count, failed_count, archived_at FROM sessions_archive
+WHERE username = $1
+ORDER BY created_at DESC
+LIMIT $2 OFFSET $3
+`
+
+type GetSessionByUsernamePaginatedPQParams struct {
+ Username string `json:"username"`
+ Limit int32 `json:"limit"`
+ Offset int32 `json:"offset"`
+}
+
+func (q *Queries) GetSessionByUsernamePaginatedPQ(ctx context.Context, arg GetSessionByUsernamePaginatedPQParams) ([]SessionsArchive, error) {
+ rows, err := q.db.Query(ctx, getSessionByUsernamePaginatedPQ, arg.Username, arg.Limit, arg.Offset)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+ items := []SessionsArchive{}
+ for rows.Next() {
+ var i SessionsArchive
+ if err := rows.Scan(
+ &i.ID,
+ &i.Username,
+ &i.Ip,
+ &i.UserAgent,
+ &i.Status,
+ &i.CreatedAt,
+ &i.LastActiveAt,
+ &i.Expiry,
+ &i.CompletedCount,
+ &i.FailedCount,
+ &i.ArchivedAt,
+ ); err != nil {
+ return nil, err
+ }
+ items = append(items, i)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return items, nil
+}
+
+const getSessionPQ = `-- name: GetSessionPQ :one
+SELECT id, username, ip, user_agent, status, created_at, last_active_at, expiry, completed_count, failed_count, archived_at FROM sessions_archive
+WHERE id = $1
+`
+
+func (q *Queries) GetSessionPQ(ctx context.Context, id uuid.UUID) (SessionsArchive, error) {
+ row := q.db.QueryRow(ctx, getSessionPQ, id)
+ var i SessionsArchive
+ err := row.Scan(
+ &i.ID,
+ &i.Username,
+ &i.Ip,
+ &i.UserAgent,
+ &i.Status,
+ &i.CreatedAt,
+ &i.LastActiveAt,
+ &i.Expiry,
+ &i.CompletedCount,
+ &i.FailedCount,
+ &i.ArchivedAt,
+ )
+ return i, err
+}
+
+const storeSessionPQ = `-- name: StoreSessionPQ :one
+INSERT INTO sessions_archive (
+ id, username, ip, user_agent, status,
+ created_at, last_active_at, expiry,
+ completed_count, failed_count
+) VALUES (
+ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10
+) RETURNING id, username, ip, user_agent, status, created_at, last_active_at, expiry, completed_count, failed_count, archived_at
+`
+
+type StoreSessionPQParams struct {
+ ID uuid.UUID `json:"id"`
+ Username string `json:"username"`
+ Ip pgtype.Text `json:"ip"`
+ UserAgent pgtype.Text `json:"user_agent"`
+ Status string `json:"status"`
+ CreatedAt pgtype.Timestamp `json:"created_at"`
+ LastActiveAt pgtype.Timestamp `json:"last_active_at"`
+ Expiry pgtype.Timestamp `json:"expiry"`
+ CompletedCount pgtype.Int4 `json:"completed_count"`
+ FailedCount pgtype.Int4 `json:"failed_count"`
+}
+
+func (q *Queries) StoreSessionPQ(ctx context.Context, arg StoreSessionPQParams) (SessionsArchive, error) {
+ row := q.db.QueryRow(ctx, storeSessionPQ,
+ arg.ID,
+ arg.Username,
+ arg.Ip,
+ arg.UserAgent,
+ arg.Status,
+ arg.CreatedAt,
+ arg.LastActiveAt,
+ arg.Expiry,
+ arg.CompletedCount,
+ arg.FailedCount,
+ )
+ var i SessionsArchive
+ err := row.Scan(
+ &i.ID,
+ &i.Username,
+ &i.Ip,
+ &i.UserAgent,
+ &i.Status,
+ &i.CreatedAt,
+ &i.LastActiveAt,
+ &i.Expiry,
+ &i.CompletedCount,
+ &i.FailedCount,
+ &i.ArchivedAt,
+ )
+ return i, err
+}
diff --git a/internal/redis/redis.go b/internal/redis/redis.go
new file mode 100644
index 0000000..d9aae38
--- /dev/null
+++ b/internal/redis/redis.go
@@ -0,0 +1,113 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/redis/go-redis/v9"
+)
+
+/* defines the methods to expose (for dependency injection) */
+type RedisClient interface {
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) error
+ Get(ctx context.Context, key string) (string, error)
+ Del(ctx context.Context, keys ...string) *redis.IntCmd
+ Scan(ctx context.Context, cursor uint64, match string, count int64) *redis.ScanCmd
+ HSet(ctx context.Context, key string, values ...interface{}) *redis.IntCmd
+ HDel(ctx context.Context, key string, fields ...string) *redis.IntCmd
+ RPush(ctx context.Context, key string, value interface{}) *redis.IntCmd
+ LRange(ctx context.Context, key string, start, stop int64) *redis.StringSliceCmd
+ PSubscribe(ctx context.Context, patterns ...string) (*redis.PubSub, error)
+ HGetAll(ctx context.Context, key string) *redis.MapStringStringCmd
+ FlushAll(ctx context.Context) error
+ HIncrBy(ctx context.Context, key, field string, incr int64) *redis.IntCmd
+}
+
+/* redisClient implementation */
+type redisClient struct {
+ client *redis.Client
+}
+
+/* for creating a new redis client */
+func NewRedisClient(address, password string, db int) (RedisClient, error) {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: address,
+ Password: password,
+ DB: db,
+ })
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ if err := rdb.Ping(ctx).Err(); err != nil {
+ return nil, fmt.Errorf("could not connect to Redis: %w", err)
+ }
+
+ return &redisClient{client: rdb}, nil
+}
+
+/* Set sets a key-value pair in Redis */
+func (r *redisClient) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) error {
+ return r.client.Set(ctx, key, value, expiration).Err()
+}
+
+/* retrieves a value by key from Redis */
+func (r *redisClient) Get(ctx context.Context, key string) (string, error) {
+ return r.client.Get(ctx, key).Result()
+}
+
+/* deletes a redis entry */
+func (r *redisClient) Del(ctx context.Context, keys ...string) *redis.IntCmd {
+ return r.client.Del(ctx, keys...)
+}
+
+/* scan for redis */
+func (r *redisClient) Scan(ctx context.Context, cursor uint64, match string, count int64) *redis.ScanCmd {
+ return r.client.Scan(ctx, cursor, match, count)
+}
+
+/* for pushing multiple elements in Redis */
+func (r *redisClient) RPush(ctx context.Context, key string, value interface{}) *redis.IntCmd {
+ return r.client.RPush(ctx, key, value)
+}
+
+/* retrieve a subset of the list stored at a specified key */
+func (r *redisClient) LRange(ctx context.Context, key string, start, stop int64) *redis.StringSliceCmd {
+ return r.client.LRange(ctx, key, start, stop)
+}
+
+/* hash set for redis */
+func (r *redisClient) HSet(ctx context.Context, key string, values ...interface{}) *redis.IntCmd {
+ return r.client.HSet(ctx, key, values...)
+}
+
+/* hash delete for redis */
+func (r *redisClient) HDel(ctx context.Context, key string, fields ...string) *redis.IntCmd {
+ return r.client.HDel(ctx, key, fields...)
+}
+
+/* subscribe to redis keyspace notifications */
+func (r *redisClient) PSubscribe(ctx context.Context, patterns ...string) (*redis.PubSub, error) {
+ pubsub := r.client.PSubscribe(ctx, patterns...)
+ _, err := pubsub.Receive(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to subscribe to patterns: %w", err)
+ }
+ return pubsub, nil
+}
+
+/* hash get all the data associated with the key */
+func (r *redisClient) HGetAll(ctx context.Context, key string) *redis.MapStringStringCmd {
+ return r.client.HGetAll(ctx, key)
+}
+
+/* flush all data from Redis */
+func (r *redisClient) FlushAll(ctx context.Context) error {
+ return r.client.FlushAll(ctx).Err()
+}
+
+/* increment the value of the field in the hash */
+func (r *redisClient) HIncrBy(ctx context.Context, key, field string, incr int64) *redis.IntCmd {
+ return r.client.HIncrBy(ctx, key, field, incr)
+}
diff --git a/internal/scheduler/fcfs/fcfs.go b/internal/scheduler/fcfs/fcfs.go
new file mode 100644
index 0000000..db863f1
--- /dev/null
+++ b/internal/scheduler/fcfs/fcfs.go
@@ -0,0 +1,130 @@
+package fcfs
+
+import (
+ "context"
+ "runtime"
+ "time"
+
+ "go.uber.org/zap"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/session"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/transprocessor"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+)
+
+/* spawns a new FCFS scheduler */
+func NewFCFSScheduler(sm *session.Manager, processor transprocessor.TransactionProcessor) *FCFSScheduler {
+ /* calculate max workers */
+ maxProcs := runtime.GOMAXPROCS(0)
+ maxWorkers := config.BackendConfig.AppInfo.MaxWorkers
+
+ /*
+ incase of maxWorkers set less than or equal to 0,
+ use 75% of GOMAXPROCS to prevent starvation to other processes
+ */
+ if maxWorkers <= 0 {
+ maxWorkers = int(float64(maxProcs) * 0.75)
+ }
+
+ /* Prevent over-allocation */
+ if maxWorkers > maxProcs {
+ maxWorkers = maxProcs
+ }
+
+ return &FCFSScheduler{
+ curSessionManager: sm,
+ maxWorkers: maxWorkers,
+ semaphore: make(chan struct{}, maxWorkers),
+ processor: processor,
+ }
+}
+
+/* run the fcfs scheduler with context */
+func (f *FCFSScheduler) Run(ctx context.Context) error {
+ for {
+ select {
+
+ /* check if ctx is done - catchable if default is not working hard (ideal scheduler) */
+ case <-ctx.Done():
+ return nil
+
+ /* in case default is working hard - ctx is passed here so it must attempt to quit */
+ default:
+ /* RULE: ctx is propagated all over the coming functions */
+
+ /* get next session in the queue (round robin manner) */
+ curSession := f.curSessionManager.GetNextSession()
+ if curSession == nil {
+ /* might need a delay of 10 ms */
+ continue
+ }
+
+ /* check if transaction queue of the session is empty */
+ curSession.Mutex.Lock()
+ if curSession.TransactionQueue.Len() == 0 {
+ curSession.Mutex.Unlock()
+ continue
+ }
+
+ /* get a transaction from the session to process */
+ transaction := curSession.TransactionQueue.Remove(curSession.TransactionQueue.Front()).(*types.Transaction)
+ curSession.Mutex.Unlock()
+
+ /* block if all workers are busy */
+ f.semaphore <- struct{}{}
+
+ /* go routine is available to be spawned */
+ go func(curSession *session.Session, transaction *types.Transaction) {
+ /* defer clearing the semaphore channel */
+ defer func() { <-f.semaphore }()
+
+ /*
+ process the transaction
+ * processTransaction handles transaction processing completely
+ * now it is responsible now responsible to execute it
+ * role of scheduler in handling transactions ends here
+ */
+ if err := f.processor.Process(ctx, curSession, transaction); err != nil {
+ zap.L().Error("Failed to process transaction",
+ zap.Error(err),
+ )
+ }
+
+ /* we assume the transaction has been processed -> updated Redis */
+ transaction.Status = types.StatusSuccess
+
+ /* update duration of transaction execution */
+ elapsed := time.Since(transaction.Timestamp)
+ transaction.DurationMs = elapsed.Milliseconds()
+
+ /* this whole code snippet should be called "Update Session State after transaction execution" */
+ /* update the session's completed/failed count */
+ curSession.Mutex.Lock()
+ if transaction.ExecStatus {
+ curSession.CompletedCount++
+ if err := f.curSessionManager.IncrementSessionCompletedRedis(curSession); err != nil {
+ zap.L().Error("Failed to increment completed session in Redis")
+ }
+ } else {
+ curSession.FailedCount++
+ if err := f.curSessionManager.IncrementSessionFailedRedis(curSession); err != nil {
+ zap.L().Error("Failed to increment failed session in Redis")
+ }
+ }
+ curSession.Mutex.Unlock()
+
+ /* store the result of processed transaction into Redis */
+ if err := f.curSessionManager.SaveTransactionRedisList(curSession, transaction, "txresults"); err != nil {
+ zap.L().Error("Failed to store processed transaction into Redis")
+ }
+
+ /* remove the transaction as pending from Redis */
+ if err := f.curSessionManager.RemovePendingTransaction(curSession, transaction.ID); err != nil {
+ zap.L().Error("Failed to remove pending transaction from Redis")
+ }
+
+ }(curSession, transaction)
+ }
+ }
+}
diff --git a/internal/scheduler/fcfs/model.go b/internal/scheduler/fcfs/model.go
new file mode 100644
index 0000000..d7e66dd
--- /dev/null
+++ b/internal/scheduler/fcfs/model.go
@@ -0,0 +1,22 @@
+package fcfs
+
+import (
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/session"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/transprocessor"
+)
+
+/*
+ Notes: the structure of scheduler is very modular
+ Docs must be updated for replacing a certain scheduler module with another
+ This includes installation of prebuilt module or developing a module
+*/
+
+/* FCFS Scheduler attached with curSession.Manager */
+type FCFSScheduler struct {
+ curSessionManager *session.Manager
+ maxWorkers int
+
+ /* for limiting spawning of goroutines */
+ semaphore chan struct{}
+ processor transprocessor.TransactionProcessor
+}
diff --git a/internal/scheduler/fcfs/utils.go b/internal/scheduler/fcfs/utils.go
new file mode 100644
index 0000000..9408843
--- /dev/null
+++ b/internal/scheduler/fcfs/utils.go
@@ -0,0 +1,25 @@
+package fcfs
+
+import (
+ "strings"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+)
+
+func FindServerFromPath(servers []config.FileSystemServers, filepath string) (isRemote bool, host string, port int, found bool) {
+ /* search through all the servers */
+ for _, server := range servers {
+ /* check if the server path has the prefix for filepath */
+ if strings.HasPrefix(filepath, server.Path) {
+ /* check if it's remote */
+ if server.Remote != nil {
+ return true, server.Remote.Host, server.Remote.Port, true
+ }
+ /* local filesystem */
+ return false, "", 0, true
+ }
+ }
+
+ /* filesystem not found */
+ return false, "", 0, false
+}
diff --git a/internal/scheduler/handler.go b/internal/scheduler/handler.go
new file mode 100644
index 0000000..57c6c64
--- /dev/null
+++ b/internal/scheduler/handler.go
@@ -0,0 +1,5 @@
+package scheduler
+
+/* contains handlers related to monitoring the scheduler */
+
+/* TODO: Implementing a watchdog */
diff --git a/internal/scheduler/interface.go b/internal/scheduler/interface.go
new file mode 100644
index 0000000..79288ac
--- /dev/null
+++ b/internal/scheduler/interface.go
@@ -0,0 +1,8 @@
+package scheduler
+
+import "context"
+
+/* scheduler interface */
+type Scheduler interface {
+ Run(ctx context.Context) error
+}
diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go
new file mode 100644
index 0000000..17f321f
--- /dev/null
+++ b/internal/scheduler/scheduler.go
@@ -0,0 +1,36 @@
+package scheduler
+
+/*
+ laclm uses FCFS scheduling algorithm
+ the scheduler module is highly modular and can be attached/detached/replaced quickly on-demand
+*/
+
+import (
+ "context"
+ "sync"
+
+ "go.uber.org/zap"
+)
+
+/*
+initialized a scheduler of Scheduler type as a goroutine with context
+when ctx.Done() is recieved, scheduler starts shutting down
+the main function waits till wg.Done() is not called ensuring complete shutdown of scheduler
+in case of any error, the errCh is used to propogate it back to main function where it's handled
+*/
+func InitScheduler(ctx context.Context, sched Scheduler, wg *sync.WaitGroup, errCh chan<- error) {
+ wg.Add(1)
+ go func(ctx context.Context) {
+ defer wg.Done()
+ zap.L().Info("Scheduler Initialization Started")
+
+ /* the context is used here for gracefully stopping the scheduler */
+ if err := sched.Run(ctx); err != nil {
+ zap.L().Error("Scheduler running error",
+ zap.Error(err),
+ )
+ } else {
+ zap.L().Info("Scheduler Stopped Gracefully")
+ }
+ }(ctx)
+}
diff --git a/internal/search/extract.go b/internal/search/extract.go
new file mode 100644
index 0000000..d73dc23
--- /dev/null
+++ b/internal/search/extract.go
@@ -0,0 +1,92 @@
+package search
+
+import (
+ "crypto/tls"
+ "fmt"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+ "github.com/go-ldap/ldap/v3"
+)
+
+/*
+ TODO: Blacklisting
+ This needs to be done when admin panel is created.
+ Users will be able to add users to blacklist which shouldn't be mentioned to the users.
+*/
+
+/* returns search for query in the pool of all users in LDAP server */
+func GetAllUsersFromLDAP(query string) ([]User, error) {
+
+ var l *ldap.Conn
+ var err error
+ ldapAddress := config.BackendConfig.Authentication.LDAPConfig.Address
+
+ /* check if TLS is enabled */
+ if config.BackendConfig.Authentication.LDAPConfig.TLS {
+ l, err = ldap.DialURL(ldapAddress, ldap.DialWithTLSConfig(&tls.Config{
+
+ /* true if using self-signed certs (not recommended) */
+ InsecureSkipVerify: true,
+ }))
+ } else {
+ l, err = ldap.DialURL(ldapAddress)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ defer l.Close()
+
+ /* authenticating with the ldap server with admin */
+ err = l.Bind(config.BackendConfig.Authentication.LDAPConfig.AdminDN,
+ config.BackendConfig.Authentication.LDAPConfig.AdminPassword,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ /* wild card to avoid errors */
+ if query == "" {
+ query = "*"
+ }
+
+ /* filter for query */
+ // filter := fmt.Sprintf("(|(cn=%s*)(uid=%s*)(mail=%s*))", query, query, query)
+ filter := fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(uid=%s*)(cn=%s*)(mail=%s*)))", query, query, query)
+
+ /* search for users */
+ searchRequest := ldap.NewSearchRequest(
+ /* Base DN */
+ config.BackendConfig.Authentication.LDAPConfig.SearchBase,
+ ldap.ScopeWholeSubtree,
+ ldap.NeverDerefAliases,
+ /* size limit */
+ 0,
+ /* time limit */
+ 0,
+ /* types only */
+ false,
+ /* filter */
+ filter,
+ /* attributes to retrieve */
+ []string{"cn", "mail", "sAMAccountName"}, //
+ nil,
+ )
+
+ /* search for request in LDAP Server */
+ sr, err := l.Search(searchRequest)
+ if err != nil {
+ return nil, err
+ }
+
+ users := []User{}
+ for _, entry := range sr.Entries {
+ users = append(users, User{
+ CN: entry.GetAttributeValue("cn"),
+ Mail: entry.GetAttributeValue("mail"),
+ Username: entry.GetAttributeValue("sAMAccountName"),
+ })
+ }
+
+ return users, nil
+}
diff --git a/internal/search/handlers.go b/internal/search/handlers.go
new file mode 100644
index 0000000..f751f8e
--- /dev/null
+++ b/internal/search/handlers.go
@@ -0,0 +1,28 @@
+package search
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "go.uber.org/zap"
+)
+
+/* handler to return list of users that match the query in LDAP server */
+func SearchUsersHandler(w http.ResponseWriter, r *http.Request) {
+ /* fetch all users from LDAP server */
+ query := r.URL.Query().Get("q")
+ users, err := GetAllUsersFromLDAP(query)
+ if err != nil {
+ zap.L().Error("LDAP error",
+ zap.Error(err),
+ )
+ http.Error(w, "LDAP error", http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(users); err != nil {
+ http.Error(w, "Failed to encode response", http.StatusInternalServerError)
+ return
+ }
+}
diff --git a/internal/search/model.go b/internal/search/model.go
new file mode 100644
index 0000000..e14617a
--- /dev/null
+++ b/internal/search/model.go
@@ -0,0 +1,8 @@
+package search
+
+/* struct for returning common name, mail, and username */
+type User struct {
+ CN string `json:"cn"`
+ Mail string `json:"mail"`
+ Username string `json:"username"`
+}
diff --git a/internal/services/services.go b/internal/services/services.go
deleted file mode 100644
index e69de29..0000000
diff --git a/internal/session/dbconv.go b/internal/session/dbconv.go
new file mode 100644
index 0000000..813b0c2
--- /dev/null
+++ b/internal/session/dbconv.go
@@ -0,0 +1,149 @@
+package session
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/google/uuid"
+ "github.com/jackc/pgx/v5/pgtype"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/postgresql"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+)
+
+/* converts project session struct into PostgreSQL supported format */
+func ConvertSessionToStoreParams(session *Session) (*postgresql.StoreSessionPQParams, error) {
+ /* validate status */
+ status := string(session.Status)
+ if status != string(StatusActive) && status != string(StatusExpired) && status != string(StatusPending) {
+ return nil, fmt.Errorf("invalid session status: %q, must be one of: %q, %q, %q",
+ status, StatusActive, StatusExpired, StatusPending)
+ }
+
+ createdAt := pgtype.Timestamp{}
+ if err := createdAt.Scan(session.CreatedAt); err != nil {
+ return nil, err
+ }
+
+ lastActiveAt := pgtype.Timestamp{}
+ if err := lastActiveAt.Scan(session.LastActiveAt); err != nil {
+ return nil, err
+ }
+
+ expiry := pgtype.Timestamp{}
+ if err := expiry.Scan(session.Expiry); err != nil {
+ return nil, err
+ }
+
+ completedCount := pgtype.Int4{Int32: int32(session.CompletedCount), Valid: true}
+ failedCount := pgtype.Int4{Int32: int32(session.FailedCount), Valid: true}
+
+ return &postgresql.StoreSessionPQParams{
+ ID: uuid.MustParse(session.ID.String()),
+ Username: session.Username,
+ Ip: pgtype.Text{String: session.IP, Valid: true},
+ UserAgent: pgtype.Text{String: session.UserAgent, Valid: true},
+ Status: string(session.Status),
+ CreatedAt: createdAt,
+ LastActiveAt: lastActiveAt,
+ Expiry: expiry,
+ CompletedCount: completedCount,
+ FailedCount: failedCount,
+ }, nil
+}
+
+/* converts project transaction struct into PostgreSQL supported format */
+func ConvertTransactionPendingtoStoreParams(tx types.Transaction) (postgresql.CreatePendingTransactionPQParams, error) {
+ /* marshal ACL entries to JSON bytes */
+ entriesJSON, err := json.Marshal(tx.Entries)
+ if err != nil {
+ return postgresql.CreatePendingTransactionPQParams{}, fmt.Errorf("failed to marshal ACL entries: %w", err)
+ }
+
+ /* convert timestamp to pgtype.Timestamptz */
+ var timestamp pgtype.Timestamptz
+ if err := timestamp.Scan(tx.Timestamp); err != nil {
+ return postgresql.CreatePendingTransactionPQParams{}, fmt.Errorf("failed to convert timestamp: %w", err)
+ }
+
+ /* handle optional error message */
+ var errorMsg pgtype.Text
+ if tx.ErrorMsg != "" {
+ errorMsg = pgtype.Text{String: tx.ErrorMsg, Valid: true}
+ }
+
+ /* handle optional output */
+ var output pgtype.Text
+ if tx.Output != "" {
+ output = pgtype.Text{String: tx.Output, Valid: true}
+ }
+
+ /* Handle optional duration */
+ var durationMs pgtype.Int8
+ if tx.DurationMs > 0 {
+ durationMs = pgtype.Int8{Int64: tx.DurationMs, Valid: true}
+ }
+
+ return postgresql.CreatePendingTransactionPQParams{
+ ID: tx.ID,
+ SessionID: tx.SessionID,
+ Timestamp: timestamp,
+ Operation: string(tx.Operation),
+ TargetPath: tx.TargetPath,
+ Entries: entriesJSON,
+ Status: string(tx.Status),
+ Execstatus: tx.ExecStatus,
+ ErrorMsg: errorMsg,
+ Output: output,
+ ExecutedBy: tx.ExecutedBy,
+ DurationMs: durationMs,
+ }, nil
+}
+
+/* converts project transaction struct into PostgreSQL supported format */
+func ConvertTransactionResulttoStoreParams(tx types.Transaction) (postgresql.CreateResultsTransactionPQParams, error) {
+ /* marshal ACL entries to JSON bytes */
+ entriesJSON, err := json.Marshal(tx.Entries)
+ if err != nil {
+ return postgresql.CreateResultsTransactionPQParams{}, fmt.Errorf("failed to marshal ACL entries: %w", err)
+ }
+
+ /* convert timestamp to pgtype.Timestamptz */
+ var timestamp pgtype.Timestamptz
+ if err := timestamp.Scan(tx.Timestamp); err != nil {
+ return postgresql.CreateResultsTransactionPQParams{}, fmt.Errorf("failed to convert timestamp: %w", err)
+ }
+
+ /* handle optional error message */
+ var errorMsg pgtype.Text
+ if tx.ErrorMsg != "" {
+ errorMsg = pgtype.Text{String: tx.ErrorMsg, Valid: true}
+ }
+
+ /* handle optional output */
+ var output pgtype.Text
+ if tx.Output != "" {
+ output = pgtype.Text{String: tx.Output, Valid: true}
+ }
+
+ /* Handle optional duration */
+ var durationMs pgtype.Int8
+ if tx.DurationMs > 0 {
+ durationMs = pgtype.Int8{Int64: tx.DurationMs, Valid: true}
+ }
+
+ return postgresql.CreateResultsTransactionPQParams{
+ ID: tx.ID,
+ SessionID: tx.SessionID,
+ Timestamp: timestamp,
+ Operation: string(tx.Operation),
+ TargetPath: tx.TargetPath,
+ Entries: entriesJSON,
+ Status: string(tx.Status),
+ Execstatus: tx.ExecStatus,
+ ErrorMsg: errorMsg,
+ Output: output,
+ ExecutedBy: tx.ExecutedBy,
+ DurationMs: durationMs,
+ }, nil
+}
diff --git a/internal/session/handler.go b/internal/session/handler.go
new file mode 100644
index 0000000..4a8fdc5
--- /dev/null
+++ b/internal/session/handler.go
@@ -0,0 +1,486 @@
+package session
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/google/uuid"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/api/middleware"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/postgresql"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+)
+
+/*
+ TODO: watchdog for session
+ Live sessions and transactions can be monitored through Redis and PostgreSQL
+ the watchdog here shows the processing happening, which needs to be done in the
+ later stages of development
+*/
+
+/* frontend safe handler for issuing transaction */
+func (m *Manager) IssueTransaction(w http.ResponseWriter, r *http.Request) {
+ /* extract username from JWT Token */
+ username, ok := r.Context().Value(middleware.ContextKeyUsername).(string)
+ if !ok {
+ http.Error(w, "Invalid user context", http.StatusInternalServerError)
+ return
+ }
+
+ /* acquire manager lock to access sessions map */
+ m.mutex.RLock()
+ session := m.sessionsMap[username]
+ m.mutex.RUnlock()
+
+ if session == nil {
+ http.Error(w, "Session not found", http.StatusNotFound)
+ return
+ }
+
+ /* acquire session lock for transaction operations */
+ session.Mutex.Lock()
+ defer session.Mutex.Unlock()
+
+ var req types.ScheduleTransactionRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, "Invalid request", http.StatusBadRequest)
+ return
+ }
+
+ tx := types.Transaction{
+ ID: uuid.New(),
+ SessionID: session.ID,
+ Timestamp: time.Now(),
+ Operation: req.Operation,
+ TargetPath: req.TargetPath,
+ Entries: req.Entries,
+ Status: types.StatusPending,
+ ExecutedBy: username,
+ }
+
+ /* add transaction to session - session lock is already held */
+ if err := m.AddTransaction(session, &tx); err != nil {
+ http.Error(w, "Failed to add transaction", http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusCreated)
+ if err := json.NewEncoder(w).Encode(map[string]string{
+ "message": "Transaction scheduled",
+ "txn_id": tx.ID.String(),
+ }); err != nil {
+ http.Error(w, "Failed to encode response", http.StatusInternalServerError)
+ return
+ }
+}
+
+type handlerCtxKey string
+type handlerType string
+
+const (
+ HandlerType handlerType = "type"
+)
+
+const (
+ CtxStreamUserSession handlerCtxKey = "stream_user_session"
+ CtxStreamUserTransactionsResults handlerCtxKey = "stream_user_transactions_results"
+ CtxStreamUserTransactionsPending handlerCtxKey = "stream_user_transactions_pending"
+ CtxStreamAllSessions handlerCtxKey = "stream_all_sessions"
+ CtxStreamAllTransactions handlerCtxKey = "stream_all_transactions"
+)
+
+/*
+get single session data
+requires user authentication from middleware
+user/
+*/
+func (m *Manager) StreamUserSession(w http.ResponseWriter, r *http.Request) {
+
+ /* get the username */
+ username, ok := r.Context().Value(middleware.ContextKeyUsername).(string)
+ if !ok {
+ http.Error(w, "Invalid user context", http.StatusInternalServerError)
+ return
+ }
+
+ /* get the session id */
+ sessionID, ok := r.Context().Value(middleware.ContextKeySessionID).(string)
+ if !ok {
+ http.Error(w, "Invalid session context", http.StatusInternalServerError)
+ return
+ }
+
+ m.mutex.RLock()
+ session, exists := m.sessionsMap[username]
+ m.mutex.RUnlock()
+
+ if !exists || session.ID.String() != sessionID {
+ http.Error(w, "unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ /* user exists and verified, upgrade the websocket connection */
+ conn, err := m.upgrader.Upgrade(w, r, nil)
+ if err != nil {
+ m.errCh <- fmt.Errorf("websocket upgrade error: %w", err)
+ return
+ }
+ defer conn.Close()
+
+ /*
+ context with cancel for web socket handlers
+ this is the official context for a websocket connection
+ cancelling this means closing components of the websocket handler
+ */
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ /* sending initial session data */
+ if err := m.sendCurrentSession(conn, sessionID); err != nil {
+ m.errCh <- fmt.Errorf("error sending initial session: %w", err)
+ return
+ }
+
+ /* stream changes in session made in Redis */
+ go m.listenForSessionChanges(ctx, conn, sessionID)
+
+ /* specify the handler context */
+ ctxVal := context.WithValue(ctx, HandlerType, CtxStreamUserSession)
+
+ /* handle web socket instructions from client */
+ m.handleWebSocketCommands(conn, username, sessionID, ctxVal, cancel)
+}
+
+/*
+get user transactions results information
+requires user authentication from middleware
+user/
+*/
+func (m *Manager) StreamUserTransactionsResults(w http.ResponseWriter, r *http.Request) {
+
+ /* get the username */
+ username, ok := r.Context().Value(middleware.ContextKeyUsername).(string)
+ if !ok {
+ http.Error(w, "Invalid user context", http.StatusInternalServerError)
+ return
+ }
+
+ /* get the session id */
+ sessionID, ok := r.Context().Value(middleware.ContextKeySessionID).(string)
+ if !ok {
+ http.Error(w, "Invalid session ID context", http.StatusInternalServerError)
+ return
+ }
+
+ m.mutex.RLock()
+ session, exists := m.sessionsMap[username]
+ m.mutex.RUnlock()
+
+ if !exists || session.ID.String() != sessionID {
+ http.Error(w, "unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ /* user exists and verified, upgrade the websocket connection */
+ conn, err := m.upgrader.Upgrade(w, r, nil)
+ if err != nil {
+ m.errCh <- fmt.Errorf("websocket upgrade error: %w", err)
+ return
+ }
+ defer conn.Close()
+
+ /*
+ context with cancel for web socket handlers
+ this is the official context for a websocket connection
+ cancelling this means closing components of the websocket handler
+ */
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ /* sending initial list of transactions data */
+ if err := m.sendCurrentUserTransactionsResults(conn, sessionID, 100); err != nil {
+ m.errCh <- fmt.Errorf("error sending initial transactions: %w", err)
+ return
+ }
+
+ /* stream changes in transactions made in redis */
+ go m.listenForTransactionsChangesResults(ctx, conn, sessionID)
+
+ /* specify the handler context */
+ ctxVal := context.WithValue(ctx, HandlerType, CtxStreamUserTransactionsResults)
+
+ /* handle web socket instructions from client */
+ m.handleWebSocketCommands(conn, username, sessionID, ctxVal, cancel)
+}
+
+/*
+get user transactions pending information
+requires user authentication from middleware
+user/
+*/
+func (m *Manager) StreamUserTransactionsPending(w http.ResponseWriter, r *http.Request) {
+
+ /* get the username */
+ username, ok := r.Context().Value(middleware.ContextKeyUsername).(string)
+ if !ok {
+ http.Error(w, "Invalid user context", http.StatusInternalServerError)
+ return
+ }
+
+ /* get the session id */
+ sessionID, ok := r.Context().Value(middleware.ContextKeySessionID).(string)
+ if !ok {
+ http.Error(w, "Invalid session ID context", http.StatusInternalServerError)
+ return
+ }
+
+ m.mutex.RLock()
+ session, exists := m.sessionsMap[username]
+ m.mutex.RUnlock()
+
+ if !exists || session.ID.String() != sessionID {
+ http.Error(w, "unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ /* user exists and verified, upgrade the websocket connection */
+ conn, err := m.upgrader.Upgrade(w, r, nil)
+ if err != nil {
+ m.errCh <- fmt.Errorf("websocket upgrade error: %w", err)
+ return
+ }
+ defer conn.Close()
+
+ /*
+ context with cancel for web socket handlers
+ this is the official context for a websocket connection
+ cancelling this means closing components of the websocket handler
+ */
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ /* sending initial list of transactions data */
+ if err := m.sendCurrentUserTransactionsPending(conn, sessionID, 100); err != nil {
+ m.errCh <- fmt.Errorf("error sending initial transactions: %w", err)
+ return
+ }
+
+ /* stream changes in transactions made in redis */
+ go m.listenForTransactionsChangesPending(ctx, conn, sessionID)
+
+ /* specify the handler context */
+ ctxVal := context.WithValue(ctx, HandlerType, CtxStreamUserTransactionsPending)
+
+ /* handle web socket instructions from client */
+ m.handleWebSocketCommands(conn, username, sessionID, ctxVal, cancel)
+}
+
+/*
+get user archived sessions information
+requires user authentication from middleware
+user/
+*/
+func (m *Manager) StreamUserArchiveSessions(w http.ResponseWriter, r *http.Request) {
+ /* get the username */
+ username, ok := r.Context().Value(middleware.ContextKeyUsername).(string)
+ if !ok {
+ http.Error(w, "Invalid user context", http.StatusInternalServerError)
+ return
+ }
+
+ /* get the session id */
+ sessionID, ok := r.Context().Value(middleware.ContextKeySessionID).(string)
+ if !ok {
+ http.Error(w, "Invalid session ID context", http.StatusInternalServerError)
+ return
+ }
+
+ /* extract session from session manager */
+ m.mutex.RLock()
+ session, exists := m.sessionsMap[username]
+ m.mutex.RUnlock()
+
+ /* check if session exists in current session manager (user session in live) */
+ if !exists || session.ID.String() != sessionID {
+ http.Error(w, "unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ /* deserialize archival request */
+ var req ArchivalRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, "invalid JSON", http.StatusBadRequest)
+ return
+ }
+
+ /* fallback to default values if values are invalid */
+ if req.Limit <= 0 {
+ req.Limit = 10
+ }
+ if req.Offset < 0 {
+ req.Offset = 0
+ }
+
+ /* get archived sessions from PostgreSQL database */
+ sessions, err := m.archivalPQ.GetSessionByUsernamePaginatedPQ(
+ r.Context(),
+ postgresql.GetSessionByUsernamePaginatedPQParams{
+ Username: username,
+ Limit: req.Limit,
+ Offset: req.Offset,
+ },
+ )
+ if err != nil {
+ m.errCh <- fmt.Errorf("error fetching archived sessions from postgresql database: %w", err)
+ http.Error(w, "database error", http.StatusInternalServerError)
+ return
+ }
+
+ /* send response with json */
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(sessions); err != nil {
+ http.Error(w, "Failed to encode response", http.StatusInternalServerError)
+ return
+ }
+}
+
+/*
+get user archived results transactions information
+requires user authentication from middleware
+user/
+*/
+func (m *Manager) StreamUserArchiveResultsTransactions(w http.ResponseWriter, r *http.Request) {
+ /* get the username */
+ username, ok := r.Context().Value(middleware.ContextKeyUsername).(string)
+ if !ok {
+ http.Error(w, "Invalid user context", http.StatusInternalServerError)
+ return
+ }
+
+ /* get the session id */
+ sessionID, ok := r.Context().Value(middleware.ContextKeySessionID).(string)
+ if !ok {
+ http.Error(w, "Invalid session ID context", http.StatusInternalServerError)
+ return
+ }
+
+ /* extract session from session manager */
+ m.mutex.RLock()
+ session, exists := m.sessionsMap[username]
+ m.mutex.RUnlock()
+
+ /* check if session exists in current session manager (user session in live) */
+ if !exists || session.ID.String() != sessionID {
+ http.Error(w, "unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ /* deserialize archival request */
+ var req ArchivalRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, "invalid JSON", http.StatusBadRequest)
+ return
+ }
+
+ /* fallback to default values if values are invalid */
+ if req.Limit <= 0 {
+ req.Limit = 10
+ }
+ if req.Offset < 0 {
+ req.Offset = 0
+ }
+
+ /* get archived transactions results from PostgreSQL database */
+ sessions, err := m.archivalPQ.GetResultsTransactionsByUserPaginatedPQ(
+ r.Context(),
+ postgresql.GetResultsTransactionsByUserPaginatedPQParams{
+ ExecutedBy: username,
+ Limit: req.Limit,
+ Offset: req.Offset,
+ },
+ )
+ if err != nil {
+ m.errCh <- fmt.Errorf("error fetching archived transaction results from postgresql database: %w", err)
+ http.Error(w, "database error", http.StatusInternalServerError)
+ return
+ }
+
+ /* send response with json */
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(sessions); err != nil {
+ http.Error(w, "Failed to encode response", http.StatusInternalServerError)
+ return
+ }
+}
+
+/*
+get user archived pending transactions information
+requires user authentication from middleware
+user/
+*/
+func (m *Manager) StreamUserArchivePendingTransactions(w http.ResponseWriter, r *http.Request) {
+ /* get the username */
+ username, ok := r.Context().Value(middleware.ContextKeyUsername).(string)
+ if !ok {
+ http.Error(w, "Invalid user context", http.StatusInternalServerError)
+ return
+ }
+
+ /* get the session id */
+ sessionID, ok := r.Context().Value(middleware.ContextKeySessionID).(string)
+ if !ok {
+ http.Error(w, "Invalid session ID context", http.StatusInternalServerError)
+ return
+ }
+
+ /* extract session from session manager */
+ m.mutex.RLock()
+ session, exists := m.sessionsMap[username]
+ m.mutex.RUnlock()
+
+ /* check if session exists in current session manager (user session in live) */
+ if !exists || session.ID.String() != sessionID {
+ http.Error(w, "unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ /* deserialize archival request */
+ var req ArchivalRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, "invalid JSON", http.StatusBadRequest)
+ return
+ }
+
+ /* fallback to default values if values are invalid */
+ if req.Limit <= 0 {
+ req.Limit = 10
+ }
+ if req.Offset < 0 {
+ req.Offset = 0
+ }
+
+ /* get archived pending transactions from PostgreSQL database */
+ sessions, err := m.archivalPQ.GetPendingTransactionsByUserPaginatedPQ(
+ r.Context(),
+ postgresql.GetPendingTransactionsByUserPaginatedPQParams{
+ ExecutedBy: username,
+ Limit: req.Limit,
+ Offset: req.Offset,
+ },
+ )
+ if err != nil {
+ m.errCh <- fmt.Errorf("error fetching archived pending transaction from postgresql database: %w", err)
+ http.Error(w, "database error", http.StatusInternalServerError)
+ return
+ }
+
+ /* send response with json */
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(sessions); err != nil {
+ http.Error(w, "Failed to encode response", http.StatusInternalServerError)
+ return
+ }
+}
diff --git a/internal/session/interact.go b/internal/session/interact.go
new file mode 100644
index 0000000..ff5be79
--- /dev/null
+++ b/internal/session/interact.go
@@ -0,0 +1,283 @@
+package session
+
+import (
+ "container/list"
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/google/uuid"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+)
+
+/* for creating a session for user - used by HTTP HANDLERS */
+func (m *Manager) CreateSession(username, ipAddress, userAgent string) (uuid.UUID, error) {
+
+ /* lock the ActiveSessions mutex till the function ends */
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ /* check if session exists -> if yes, reset the timer and return the session ID */
+ if session, exists := m.sessionsMap[username]; exists {
+ if err := m.RefreshTimer(username); err != nil {
+ m.errCh <- err
+ return uuid.Nil, fmt.Errorf("sessions exists, but failed to refresh the timer")
+ }
+ return session.ID, nil
+ }
+
+ /* Generate session metadata */
+ sessionID := uuid.New()
+ now := time.Now()
+
+ /* create the session */
+ session := &Session{
+ ID: sessionID,
+ Status: StatusActive,
+ Username: username,
+ IP: ipAddress,
+ UserAgent: userAgent,
+ Expiry: time.Now().Add(time.Duration(config.BackendConfig.AppInfo.SessionTimeout) * time.Hour),
+ CreatedAt: now,
+ LastActiveAt: now,
+ Timer: time.AfterFunc(time.Duration(config.BackendConfig.AppInfo.SessionTimeout)*time.Hour,
+ func() {
+ err := m.ExpireSession(username)
+ if err != nil {
+ m.errCh <- err
+ }
+ },
+ ),
+ CompletedCount: 0,
+ FailedCount: 0,
+ TransactionQueue: list.New(),
+ }
+
+ /* add session to active sessions map and list */
+ element := m.sessionOrder.PushBack(session)
+ session.listElem = element
+
+ /* store session into the manager */
+ m.sessionsMap[username] = session
+
+ /* store session to Redis */
+ if err := m.saveSessionRedis(session); err != nil {
+ m.errCh <- err
+ return uuid.Nil, fmt.Errorf("failed to store session to Redis")
+ }
+
+ return sessionID, nil
+}
+
+/* for expiring a session */
+func (m *Manager) ExpireSession(username string) error {
+ /* thread safety for the manager */
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ /* check if user exists in active sessions */
+ session, ok := m.sessionsMap[username]
+ if !ok {
+ return fmt.Errorf("active user session not found")
+ }
+
+ session.Mutex.Lock()
+ defer session.Mutex.Unlock()
+
+ /*
+ delete the session from Redis
+ check if any transactions are remaining in the queue
+ if yes, label transactions and sessions pending
+ if no, label session expired
+ push session and transactions to archive
+ */
+
+ /* check if transactions are remaining in the session queue */
+ if session.TransactionQueue.Len() != 0 {
+ /* transactions are pending, mark them pending */
+ for node := session.TransactionQueue.Front(); node != nil; node = node.Next() {
+ txResult, ok := node.Value.(*types.Transaction)
+ if !ok {
+ continue
+ }
+
+ /* make sure to set status to pending (shouldn't it be already set?) */
+ txResult.Status = types.StatusPending
+
+ /* convert transactions into PostgreSQL compatible parameters */
+ txnPQ, err := ConvertTransactionPendingtoStoreParams(*txResult)
+ if err != nil {
+ m.errCh <- fmt.Errorf("failed to convert pending transaction to pending archive format: %w", err)
+ continue
+ }
+
+ /* store transaction in PostgreSQL with retries */
+ var storeErr error
+ for retries := 0; retries < 3; retries++ {
+ if _, err := m.archivalPQ.CreatePendingTransactionPQ(context.Background(), txnPQ); err != nil {
+ storeErr = err
+ time.Sleep(time.Second * time.Duration(retries+1))
+ continue
+ }
+ storeErr = nil
+ break
+ }
+ if storeErr != nil {
+ m.errCh <- fmt.Errorf("failed to archive transaction %s after retries: %w", txResult.ID, storeErr)
+ continue
+ }
+ }
+
+ /* mark session as pending */
+ session.Status = StatusPending
+ } else {
+ /* empty transactions queue; mark the session as expired */
+ session.Status = StatusExpired
+ }
+
+ /* get transaction results from Redis */
+ results, err := m.getTransactionResultsRedis(session, 10000)
+ if err != nil {
+ m.errCh <- fmt.Errorf("failed to get transaction results from Redis: %w", err)
+ } else {
+ for _, txResult := range results {
+ if txResult.Status == types.StatusSuccess || txResult.Status == types.StatusFailed {
+ pqParams, err := ConvertTransactionResulttoStoreParams(txResult)
+ if err != nil {
+ m.errCh <- fmt.Errorf("failed to convert transaction result to archive format: %w", err)
+ continue
+ }
+ var storeErr error
+ for retries := 0; retries < 3; retries++ {
+ if _, err := m.archivalPQ.CreateResultsTransactionPQ(context.Background(), pqParams); err != nil {
+ storeErr = err
+ time.Sleep(time.Second * time.Duration(retries+1))
+ continue
+ }
+ storeErr = nil
+ break
+ }
+ if storeErr != nil {
+ m.errCh <- fmt.Errorf("failed to archive transaction result %s after retries: %w", txResult.ID, storeErr)
+ continue
+ }
+ }
+ }
+ }
+
+ /* remove session from sessionOrder Linked List */
+ if session.listElem != nil {
+ m.sessionOrder.Remove(session.listElem)
+ }
+
+ /* convert all session parameters to PostgreSQL compatible parameters */
+ archive, err := ConvertSessionToStoreParams(session)
+ if err == nil {
+ /* store session to the archive with retries */
+ var storeErr error
+ for retries := 0; retries < 3; retries++ {
+ if _, err := m.archivalPQ.StoreSessionPQ(context.Background(), *archive); err != nil {
+ storeErr = err
+ time.Sleep(time.Second * time.Duration(retries+1))
+ continue
+ }
+ storeErr = nil
+ break
+ }
+ if storeErr != nil {
+ m.errCh <- fmt.Errorf("failed to archive session after retries: %w", storeErr)
+ }
+ } else {
+ /* handle err */
+ m.errCh <- fmt.Errorf("failed to convert session to archive format: %w", err)
+ }
+
+ /* delete both session and transaction results from Redis */
+ sessionKey := fmt.Sprintf("session:%s", session.ID)
+ txResultsKey := fmt.Sprintf("session:%s:txresults", session.ID)
+ result := m.redis.Del(context.Background(), sessionKey, txResultsKey)
+ if result.Err() != nil {
+ m.errCh <- fmt.Errorf("failed to delete session from Redis: %w", result.Err())
+ }
+
+ /* remove session from sessionsMap */
+ delete(m.sessionsMap, username)
+
+ return nil
+}
+
+/* add transaction to a session - assumes caller holds necessary locks */
+func (m *Manager) AddTransaction(session *Session, txn *types.Transaction) error {
+ /* push transaction into the queue from back */
+ session.TransactionQueue.PushBack(txn)
+
+ /* store transaction to Redis as a pending transaction */
+ if err := m.SavePendingTransaction(session, txn); err != nil {
+ return fmt.Errorf("failed to save transaction to Redis: %w", err)
+ }
+
+ return nil
+}
+
+/* refresh the session timer */
+func (m *Manager) RefreshTimer(username string) error {
+ /* get session from sessionMap */
+ session, exists := m.sessionsMap[username]
+ if !exists {
+ return fmt.Errorf("session not found")
+ }
+
+ /* thread safety for the session */
+ session.Mutex.Lock()
+ defer session.Mutex.Unlock()
+
+ /* reset the expiry time and last active time */
+ session.Expiry = time.Now().Add(time.Duration(config.BackendConfig.AppInfo.SessionTimeout) * time.Hour)
+ session.LastActiveAt = time.Now()
+
+ /* stop the session timer */
+ if session.Timer != nil {
+ session.Timer.Stop()
+ }
+
+ /* reset the session timer */
+ session.Timer = time.AfterFunc(time.Duration(config.BackendConfig.AppInfo.SessionTimeout)*time.Hour,
+ func() {
+ if err := m.ExpireSession(username); err != nil {
+ m.errCh <- err
+ }
+ },
+ )
+
+ /* update Redis for session */
+ if err := m.saveSessionRedis(session); err != nil {
+ m.errCh <- err
+ return fmt.Errorf("failed to store session to Redis")
+ }
+
+ return nil
+}
+
+/* check is a session exists for a username */
+func (m *Manager) SessionExistance(username string) (uuid.UUID, bool, error) {
+ /* thread safety for the manager */
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ /* get session from sessionMap */
+ session, exists := m.sessionsMap[username]
+ if exists {
+
+ /* thread safety for the session */
+ session.Mutex.Lock()
+ defer session.Mutex.Unlock()
+
+ if session.Username == username {
+ return session.ID, true, nil
+ }
+ }
+
+ return uuid.Nil, false, nil
+}
diff --git a/internal/session/manager.go b/internal/session/manager.go
new file mode 100644
index 0000000..990d378
--- /dev/null
+++ b/internal/session/manager.go
@@ -0,0 +1,64 @@
+package session
+
+import (
+ "container/list"
+ "net/http"
+ "sync"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/postgresql"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/redis"
+ "github.com/gorilla/websocket"
+)
+
+var customupgrader = websocket.Upgrader{
+ CheckOrigin: func(r *http.Request) bool {
+ return true // Allow all connections; customize as needed
+ },
+}
+
+/*
+session manager
+sessionsMap -> Maps of sessions -> for O(1) access | fast access during deletion
+sessionOrder -> LinkedList of sessions -> for round robin | fair scheduling
+sessionsMap and sessionOrder are always in sync
+both are kept at the same time due to various runtime performance requirements
+trading off space for runtime speed performance
+*/
+type Manager struct {
+ sessionsMap map[string]*Session
+ sessionOrder *list.List
+ mutex sync.RWMutex
+ redis redis.RedisClient
+ archivalPQ *postgresql.Queries
+ errCh chan<- error
+ upgrader websocket.Upgrader
+}
+
+/* create a new session manager */
+func NewManager(redis redis.RedisClient, archivalPQ *postgresql.Queries, errCh chan<- error) *Manager {
+ return &Manager{
+ sessionsMap: make(map[string]*Session),
+ sessionOrder: list.New(),
+ redis: redis,
+ archivalPQ: archivalPQ,
+ errCh: errCh,
+ upgrader: customupgrader,
+ }
+}
+
+/* get next session for round robin */
+func (m *Manager) GetNextSession() *Session {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ /* check if sessionOrder is empty */
+ if m.sessionOrder.Len() == 0 {
+ return nil
+ }
+
+ element := m.sessionOrder.Front()
+ session := element.Value.(*Session)
+
+ m.sessionOrder.MoveToBack(element)
+ return session
+}
diff --git a/internal/session/model.go b/internal/session/model.go
new file mode 100644
index 0000000..585233f
--- /dev/null
+++ b/internal/session/model.go
@@ -0,0 +1,116 @@
+package session
+
+import (
+ "container/list"
+ "sync"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+/* defining Status type for sessions */
+type Status string
+
+/* for status field */
+const (
+ StatusActive Status = "active"
+ StatusExpired Status = "expired"
+ StatusPending Status = "pending"
+)
+
+/*
+session struct for a user
+appropriate fields must always be updated when any request is made
+*/
+type Session struct {
+ /* keep count of completed and failed transactions */
+ CompletedCount int
+ FailedCount int
+
+ /* session status: active: 1 / expired: 0 */
+ Status Status
+
+ /* unique ID of session [will be associated with the user forever in logs] */
+ ID uuid.UUID
+
+ /* username of the user */
+ Username string
+
+ /*
+ IP and UserAgent for security logs
+ also can be used for blacklisting and whitelistings
+ illegal useragents can be caught as well as unauthorized IP addresses
+ */
+ IP string
+ UserAgent string
+
+ /* for logging user activity */
+ CreatedAt time.Time
+ LastActiveAt time.Time
+ Expiry time.Time
+ Timer *time.Timer
+
+ /* transactions issued by the user */
+ TransactionQueue *list.List
+
+ /*
+ listElem stores it's node address in sessionOrder
+ this is done to maintain O(1) runtime performance while deleting session
+ */
+ listElem *list.Element
+
+ /* mutex for thread safety */
+ Mutex sync.Mutex
+}
+
+/* SessionStreamData is a frontend-safe representation of a session that goes through websocket */
+type SessionStreamData struct {
+ ID string `json:"id"`
+ Username string `json:"username"`
+ IP string `json:"ip"`
+ UserAgent string `json:"userAgent"`
+ Status string `json:"status"`
+ CreatedAt time.Time `json:"createdAt"`
+ LastActiveAt time.Time `json:"lastActiveAt"`
+ Expiry time.Time `json:"expiry"`
+ CompletedCount int `json:"completed"`
+ FailedCount int `json:"failed"`
+}
+
+/* websocket stream message */
+type StreamMessage struct {
+ Type string `json:"type"`
+ Data any `json:"data"`
+ Timestamp time.Time `json:"timestamp"`
+}
+
+/* TransactionStreamData is a frontend-safe representation of a transaction sent via websocket */
+type TransactionStreamData struct {
+ ID string `json:"id"`
+ SessionID string `json:"sessionId"`
+ Timestamp time.Time `json:"timestamp"`
+ Operation string `json:"operation"`
+ TargetPath string `json:"targetPath"`
+ Entries []ACLEntryStream `json:"entries"`
+ Status string `json:"status"`
+ ErrorMsg string `json:"errorMsg,omitempty"`
+ Output string `json:"output"`
+ ExecutedBy string `json:"executedBy"`
+ DurationMs int64 `json:"durationMs"`
+}
+
+/* ACLEntryStream is a frontend-safe version of an individual ACL entry */
+type ACLEntryStream struct {
+ EntityType string `json:"entityType"`
+ Entity string `json:"entity"`
+ Permissions string `json:"permissions"`
+ Action string `json:"action"`
+ Success bool `json:"success"`
+ Error string `json:"error,omitempty"`
+}
+
+/* archival data fetch requests */
+type ArchivalRequest struct {
+ Limit int32 `json:"limit"`
+ Offset int32 `json:"offset"`
+}
diff --git a/internal/session/sessredis.go b/internal/session/sessredis.go
new file mode 100644
index 0000000..81dd16a
--- /dev/null
+++ b/internal/session/sessredis.go
@@ -0,0 +1,48 @@
+package session
+
+import (
+ "context"
+ "fmt"
+)
+
+/* TODO: make the operations below thread safe with mutexes*/
+
+/* store session into Redis database */
+func (m *Manager) saveSessionRedis(session *Session) error {
+ ctx := context.Background()
+
+ /* session key for redis */
+ key := fmt.Sprintf("session:%s", session.ID)
+
+ /* serialize the session with relevant information */
+ sessionSerialized := session.serializeSessionForRedis()
+
+ /* hset the session to redis */
+ if err := m.redis.HSet(ctx, key, sessionSerialized).Err(); err != nil {
+ return fmt.Errorf("failed to save session to Redis: %w", err)
+ }
+
+ return nil
+}
+
+/* increment the failed field of the session in Redis */
+func (m *Manager) IncrementSessionFailedRedis(session *Session) error {
+ ctx := context.Background()
+ key := fmt.Sprintf("session:%s", session.ID)
+
+ if err := m.redis.HIncrBy(ctx, key, "failed", 1).Err(); err != nil {
+ return fmt.Errorf("failed to increment failed count in Redis: %w", err)
+ }
+ return nil
+}
+
+/* increment the completed field of the session in Redis */
+func (m *Manager) IncrementSessionCompletedRedis(session *Session) error {
+ ctx := context.Background()
+ key := fmt.Sprintf("session:%s", session.ID)
+
+ if err := m.redis.HIncrBy(ctx, key, "completed", 1).Err(); err != nil {
+ return fmt.Errorf("failed to increment completed count in Redis: %w", err)
+ }
+ return nil
+}
diff --git a/internal/session/socket.go b/internal/session/socket.go
new file mode 100644
index 0000000..58eb0dd
--- /dev/null
+++ b/internal/session/socket.go
@@ -0,0 +1,67 @@
+package session
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/gorilla/websocket"
+)
+
+/* handle websocket commands from clients */
+func (m *Manager) handleWebSocketCommands(conn *websocket.Conn, username, sessionID string, ctxVal context.Context, cancel context.CancelFunc) {
+ defer cancel()
+
+ /* infinite loop */
+ for {
+ var msg map[string]any
+ err := conn.ReadJSON(&msg)
+ if err != nil {
+ if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
+ m.errCh <- fmt.Errorf("websocket error: %w", err)
+ }
+ break
+ }
+
+ /* handle commands from clients */
+ if msgType, ok := msg["type"].(string); ok {
+ switch msgType {
+
+ /* ping echo test */
+ case "ping":
+ pongMsg := StreamMessage{
+ Type: "pong",
+ Data: "pong",
+ Timestamp: time.Now(),
+ }
+ if err := conn.WriteJSON(pongMsg); err != nil {
+ m.errCh <- fmt.Errorf("failed to send pong: %w", err)
+ return
+ }
+
+ /* refresh content served */
+ case "refresh":
+ /* client requests fresh data - implement based on current context */
+ val := ctxVal.Value(HandlerType)
+
+ switch val {
+ case CtxStreamUserSession:
+ /* push user session */
+ if err := m.sendCurrentSession(conn, sessionID); err != nil {
+ m.errCh <- fmt.Errorf("failed to send current session on command: %w", err)
+ }
+ case CtxStreamUserTransactionsResults:
+ /* push user transactions results */
+ if err := m.sendCurrentUserTransactionsResults(conn, sessionID, 100); err != nil {
+ m.errCh <- fmt.Errorf("failed to send current list of results transactions on command: %w", err)
+ }
+ case CtxStreamUserTransactionsPending:
+ /* push user transactions pending */
+ if err := m.sendCurrentUserTransactionsPending(conn, sessionID, 100); err != nil {
+ m.errCh <- fmt.Errorf("failed to send current list of results transactions on command: %w", err)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/internal/session/stream_user.go b/internal/session/stream_user.go
new file mode 100644
index 0000000..1e741e9
--- /dev/null
+++ b/internal/session/stream_user.go
@@ -0,0 +1,334 @@
+package session
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+ "github.com/gorilla/websocket"
+ "github.com/redis/go-redis/v9"
+)
+
+/* ==== User Session ==== */
+
+/* send current session of a user */
+func (m *Manager) sendCurrentSession(conn *websocket.Conn, sessionID string) error {
+ ctx := context.Background()
+
+ /* get data for current session from Redis */
+ key := fmt.Sprintf("session:%s", sessionID)
+ sessionData, err := m.redis.HGetAll(ctx, key).Result()
+ if err != nil {
+ /* user session doen't exists */
+ if err == redis.Nil {
+ message := StreamMessage{
+ Type: "session_state",
+ Data: map[string]any{
+ "session_id": sessionID,
+ "exists": false,
+ },
+ Timestamp: time.Now(),
+ }
+ return conn.WriteJSON(message)
+ }
+ /* error cannot be handed, return a error */
+ return fmt.Errorf("failed to get session data: %w", err)
+ }
+
+ /* session exists; convert Redis hash into session data */
+ session := convertRedisHashToSession(sessionData)
+ message := StreamMessage{
+ Type: "session_state",
+ Data: map[string]any{
+ "session_id": sessionID,
+ "exists": true,
+ "session": session,
+ },
+ Timestamp: time.Now(),
+ }
+
+ return conn.WriteJSON(message)
+}
+
+/* send data regarding current session */
+func (m *Manager) listenForSessionChanges(ctx context.Context, conn *websocket.Conn, sessionID string) {
+ /* subscribe to both keyspace and keyevent notifications */
+ keyspacePattern := fmt.Sprintf("__keyspace@0__:session:%s", sessionID)
+ keyeventPattern := fmt.Sprintf("__keyevent@0__:hset:session:%s", sessionID)
+
+ /* subscribe to Redis keyspace and keyevent */
+ pubsub, err := m.redis.PSubscribe(ctx, keyspacePattern, keyeventPattern)
+ if err != nil {
+ m.errCh <- fmt.Errorf("failed to subscribe to redis events: %w", err)
+ return
+ }
+
+ defer pubsub.Close()
+
+ /* Redis update channel */
+ ch := pubsub.Channel()
+
+ /* handling session changes */
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case msg := <-ch:
+ /* changes in session stored in Redis detected; handle the event */
+ if err := m.handleSessionChangeEvent(conn, sessionID, msg); err != nil {
+ m.errCh <- fmt.Errorf("error handling session change: %w", err)
+ }
+ }
+ }
+}
+
+/* handle session change event */
+func (m *Manager) handleSessionChangeEvent(conn *websocket.Conn, sessionID string, msg *redis.Message) error {
+ ctx := context.Background()
+
+ /* get session data from Redis */
+ key := fmt.Sprintf("session:%s", sessionID)
+ sessionData, err := m.redis.HGetAll(ctx, key).Result()
+ if err != nil {
+ return fmt.Errorf("failed to get updated session data: %w", err)
+ }
+
+ /* convert session data from Redis hash to session */
+ session := convertRedisHashToSession(sessionData)
+
+ /* prepare the message payload */
+ message := StreamMessage{
+ Type: "session_update",
+ Data: map[string]any{
+ "session_id": sessionID,
+ "session": session,
+ "event_type": msg.Payload,
+ "event_source": "redis_keyspace",
+ },
+ Timestamp: time.Now(),
+ }
+
+ /* send the message to the client */
+ return conn.WriteJSON(message)
+}
+
+/* ==== User Transaction List ==== */
+
+/* send current user results transactions */
+func (m *Manager) sendCurrentUserTransactionsResults(conn *websocket.Conn, sessionID string, limit int) error {
+ ctx := context.Background()
+
+ /* get latest transactions from Redis */
+ key := fmt.Sprintf("session:%s:txresults", sessionID)
+ values, err := m.redis.LRange(ctx, key, int64(-limit), -1).Result()
+ if err != nil {
+ return fmt.Errorf("failed to get transaction results: %w", err)
+ }
+
+ /* convert each JSON string back into a Transaction */
+ transactions := make([]types.Transaction, 0, len(values))
+ for _, val := range values {
+ var tx types.Transaction
+ if err := json.Unmarshal([]byte(val), &tx); err != nil {
+ /* skip malformed results */
+ continue
+ }
+ transactions = append(transactions, tx)
+ }
+
+ /* prepare the message payload */
+ message := StreamMessage{
+ Type: "transaction_update",
+ Data: map[string]any{
+ "session_id": sessionID,
+ "transactions": transactions,
+ },
+ Timestamp: time.Now(),
+ }
+
+ /* send the message to the client */
+ return conn.WriteJSON(message)
+}
+
+/* send current user pending transactions */
+func (m *Manager) sendCurrentUserTransactionsPending(conn *websocket.Conn, sessionID string, limit int) error {
+ ctx := context.Background()
+
+ /* get latest transactions from Redis */
+ key := fmt.Sprintf("session:%s:txpending", sessionID)
+ values, err := m.redis.LRange(ctx, key, int64(-limit), -1).Result()
+ if err != nil {
+ return fmt.Errorf("failed to get transaction results: %w", err)
+ }
+
+ /* convert each JSON string back into a Transaction */
+ transactions := make([]types.Transaction, 0, len(values))
+ for _, val := range values {
+ var tx types.Transaction
+ if err := json.Unmarshal([]byte(val), &tx); err != nil {
+ /* skip malformed results */
+ continue
+ }
+ transactions = append(transactions, tx)
+ }
+
+ /* prepare the message payload */
+ message := StreamMessage{
+ Type: "transaction_update",
+ Data: map[string]any{
+ "session_id": sessionID,
+ "transactions": transactions,
+ },
+ Timestamp: time.Now(),
+ }
+
+ /* send the message to the client */
+ return conn.WriteJSON(message)
+}
+
+/* listen for results transaction changes in Redis */
+func (m *Manager) listenForTransactionsChangesResults(ctx context.Context, conn *websocket.Conn, sessionID string) {
+ /* subscribe to both keyspace and keyevent notifications */
+ keyspacePattern := fmt.Sprintf("__keyspace@0__:session:%s:txresults", sessionID)
+ keyeventPattern := fmt.Sprintf("__keyevent@0__:rpush:session:%s:txresults", sessionID)
+
+ /* subscribe to Redis keyspace and keyevent */
+ pubsub, err := m.redis.PSubscribe(ctx, keyspacePattern, keyeventPattern)
+ if err != nil {
+ m.errCh <- fmt.Errorf("failed to subscribe to redis events: %w", err)
+ return
+ }
+
+ defer pubsub.Close()
+
+ /* Redis update channel */
+ ch := pubsub.Channel()
+
+ /* handling transaction changes */
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case msg := <-ch:
+ /* changes in transactions stored in Redis detected; handle the event */
+ if err := m.handleTransactionChangeEventResults(conn, sessionID, msg); err != nil {
+ m.errCh <- fmt.Errorf("error handling transaction change: %w", err)
+ }
+ }
+ }
+}
+
+/* listen for pending transaction changes in Redis */
+func (m *Manager) listenForTransactionsChangesPending(ctx context.Context, conn *websocket.Conn, sessionID string) {
+ /* subscribe to both keyspace and keyevent notifications */
+ keyspacePattern := fmt.Sprintf("__keyspace@0__:session:%s:txpending", sessionID)
+ keyeventPattern := fmt.Sprintf("__keyevent@0__:rpush:session:%s:txpending", sessionID)
+
+ /* subscribe to Redis keyspace and keyevent */
+ pubsub, err := m.redis.PSubscribe(ctx, keyspacePattern, keyeventPattern)
+ if err != nil {
+ m.errCh <- fmt.Errorf("failed to subscribe to redis events: %w", err)
+ return
+ }
+
+ defer pubsub.Close()
+
+ /* Redis update channel */
+ ch := pubsub.Channel()
+
+ /* handling transaction changes */
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case msg := <-ch:
+ /* changes in transactions stored in Redis detected; handle the event */
+ if err := m.handleTransactionChangeEventPending(conn, sessionID, msg); err != nil {
+ m.errCh <- fmt.Errorf("error handling transaction change: %w", err)
+ }
+ }
+ }
+}
+
+/*
+ currently, handleTransactionChangeEvent sends the complete JSON package whenever anything is updated.
+ The whole frontend will be updated even if one transaction changes it's state (for example, setting active to expired).
+*/
+
+/* handle transaction results change event */
+func (m *Manager) handleTransactionChangeEventResults(conn *websocket.Conn, sessionID string, msg *redis.Message) error {
+ ctx := context.Background()
+
+ /* get latest transactions */
+ key := fmt.Sprintf("session:%s:txresults", sessionID)
+ values, err := m.redis.LRange(ctx, key, -100, -1).Result()
+ if err != nil {
+ return fmt.Errorf("failed to get transaction results: %w", err)
+ }
+
+ /* convert each JSON string back into a Transaction */
+ transactions := make([]types.Transaction, 0, len(values))
+ for _, val := range values {
+ var tx types.Transaction
+ if err := json.Unmarshal([]byte(val), &tx); err != nil {
+ /* skip malformed results */
+ continue
+ }
+ transactions = append(transactions, tx)
+ }
+
+ /* prepare the message payload */
+ message := StreamMessage{
+ Type: "transaction_update",
+ Data: map[string]any{
+ "session_id": sessionID,
+ "transactions": transactions,
+ "event_type": msg.Payload,
+ "event_source": "redis_keyspace",
+ },
+ Timestamp: time.Now(),
+ }
+
+ /* send the message to the client */
+ return conn.WriteJSON(message)
+}
+
+/* handle transaction pending change event */
+func (m *Manager) handleTransactionChangeEventPending(conn *websocket.Conn, sessionID string, msg *redis.Message) error {
+ ctx := context.Background()
+
+ /* get latest transactions */
+ key := fmt.Sprintf("session:%s:txpending", sessionID)
+ values, err := m.redis.LRange(ctx, key, -100, -1).Result()
+ if err != nil {
+ return fmt.Errorf("failed to get pending transactions: %w", err)
+ }
+
+ /* convert each JSON string back into a Transaction */
+ transactions := make([]types.Transaction, 0, len(values))
+ for _, val := range values {
+ var tx types.Transaction
+ if err := json.Unmarshal([]byte(val), &tx); err != nil {
+ /* skip malformed results */
+ continue
+ }
+ transactions = append(transactions, tx)
+ }
+
+ /* prepare the message payload */
+ message := StreamMessage{
+ Type: "transaction_update",
+ Data: map[string]any{
+ "session_id": sessionID,
+ "transactions": transactions,
+ "event_type": msg.Payload,
+ "event_source": "redis_keyspace",
+ },
+ Timestamp: time.Now(),
+ }
+
+ /* send the message to the client */
+ return conn.WriteJSON(message)
+}
diff --git a/internal/session/txnredis.go b/internal/session/txnredis.go
new file mode 100644
index 0000000..0d2950d
--- /dev/null
+++ b/internal/session/txnredis.go
@@ -0,0 +1,93 @@
+package session
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/google/uuid"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+)
+
+/* save a pending transaction in Redis as transactionID -> JSON in session:sessionID:txnpending */
+func (m *Manager) SavePendingTransaction(session *Session, tx *types.Transaction) error {
+ ctx := context.Background()
+
+ /* get the session ID */
+ sessionID := session.ID
+
+ /* create the Redis key for pending transactions */
+ key := fmt.Sprintf("session:%s:txnpending", sessionID)
+
+ /* marshal transaction to JSON */
+ txBytes, err := json.Marshal(tx)
+ if err != nil {
+ return fmt.Errorf("failed to marshal transaction: %w", err)
+ }
+
+ /* use HSET to store transactionID -> JSON */
+ return m.redis.HSet(ctx, key, tx.ID.String(), txBytes).Err()
+}
+
+/* remove a pending transaction by ID from Redis HASH session::txnpending */
+func (m *Manager) RemovePendingTransaction(session *Session, txnID uuid.UUID) error {
+ ctx := context.Background()
+
+ sessionID := session.ID
+ key := fmt.Sprintf("session:%s:txnpending", sessionID)
+
+ /* remove the transaction ID field from the hash */
+ return m.redis.HDel(ctx, key, txnID.String()).Err()
+}
+
+/* returns latest results of processed transactions */
+func (m *Manager) getTransactionResultsRedis(session *Session, limit int) ([]types.Transaction, error) {
+ ctx := context.Background()
+
+ /* get the session ID */
+ sessionID := session.ID
+
+ /* create a key for Redis operation */
+ key := fmt.Sprintf("session:%s:txresults", sessionID)
+
+ /* returns transactions in chronological order */
+ values, err := m.redis.LRange(ctx, key, int64(-limit), -1).Result()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get transaction results: %w", err)
+ }
+
+ /* converts each JSON string back into a TransactionResult */
+ results := make([]types.Transaction, 0, len(values))
+ for _, val := range values {
+ var result types.Transaction
+ if err := json.Unmarshal([]byte(val), &result); err != nil {
+ m.errCh <- fmt.Errorf("failed to unmarshal transaction result: %w; value: %s", err, val)
+ continue
+ }
+ results = append(results, result)
+ }
+
+ return results, nil
+}
+
+/* save transaction results to redis */
+func (m *Manager) SaveTransactionRedisList(session *Session, txResult *types.Transaction, list string) error {
+
+ ctx := context.Background()
+
+ /* get the session ID */
+ sessionID := session.ID
+
+ /* create a key for Redis operation */
+ key := fmt.Sprintf("session:%s:%s", sessionID, list)
+
+ /* marshal transaction result to JSON */
+ resultBytes, err := json.Marshal(txResult)
+ if err != nil {
+ return fmt.Errorf("failed to marshal transaction result: %w", err)
+ }
+
+ /* push the transaction result in the back of the list */
+ return m.redis.RPush(ctx, key, resultBytes).Err()
+}
diff --git a/internal/session/utils.go b/internal/session/utils.go
new file mode 100644
index 0000000..af5b4e0
--- /dev/null
+++ b/internal/session/utils.go
@@ -0,0 +1,34 @@
+package session
+
+import "time"
+
+/* serialize session information to store in Redis */
+func (s *Session) serializeSessionForRedis() map[string]any {
+ return map[string]any{
+ "id": s.ID,
+ "username": s.Username,
+ "ip": s.IP,
+ "user_agent": s.UserAgent,
+ "status": "active",
+ "created_at": s.CreatedAt.Format(time.RFC3339),
+ "last_active_at": s.LastActiveAt.Format(time.RFC3339),
+ "expiry": s.Expiry.Format(time.RFC3339),
+ "completed": s.CompletedCount,
+ "failed": s.FailedCount,
+ }
+}
+
+/* returns all the usernames in the manager */
+func (m *Manager) GetAllUsernames() []string {
+ /* thread safety of manager */
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ /* create and fill slice for usernames */
+ usernames := make([]string, 0, len(m.sessionsMap))
+ for _, session := range m.sessionsMap {
+ usernames = append(usernames, session.Username)
+ }
+
+ return usernames
+}
diff --git a/internal/session/ws_utils.go b/internal/session/ws_utils.go
new file mode 100644
index 0000000..ea8249e
--- /dev/null
+++ b/internal/session/ws_utils.go
@@ -0,0 +1,109 @@
+package session
+
+import (
+ "strconv"
+ "time"
+)
+
+/* convert redis hash to session */
+func convertRedisHashToSession(hash map[string]string) SessionStreamData {
+ session := SessionStreamData{}
+
+ if val, ok := hash["id"]; ok {
+ session.ID = val
+ }
+
+ if val, ok := hash["username"]; ok {
+ session.Username = val
+ }
+
+ if val, ok := hash["ip"]; ok {
+ session.IP = val
+ }
+
+ if val, ok := hash["user_agent"]; ok {
+ session.UserAgent = val
+ }
+
+ if val, ok := hash["status"]; ok {
+ session.Status = val
+ }
+
+ if val, ok := hash["created_at"]; ok {
+ if t, err := time.Parse(time.RFC3339, val); err == nil {
+ session.CreatedAt = t
+ }
+ }
+
+ if val, ok := hash["last_active_at"]; ok {
+ if t, err := time.Parse(time.RFC3339, val); err == nil {
+ session.LastActiveAt = t
+ }
+ }
+
+ if val, ok := hash["expiry"]; ok {
+ if t, err := time.Parse(time.RFC3339, val); err == nil {
+ session.Expiry = t
+ }
+ }
+
+ if val, ok := hash["completed"]; ok {
+ if i, err := strconv.Atoi(val); err == nil {
+ session.CompletedCount = i
+ }
+ }
+
+ if val, ok := hash["failed"]; ok {
+ if i, err := strconv.Atoi(val); err == nil {
+ session.FailedCount = i
+ }
+ }
+
+ return session
+}
+
+// /* build transaction stream data from a map */
+// func buildTransactionStreamDataFromMap(data map[string]interface{}) (TransactionStreamData, error) {
+// entriesRaw, _ := data["entries"].([]interface{})
+// entries := make([]ACLEntryStream, 0, len(entriesRaw))
+// for _, e := range entriesRaw {
+// if entryMap, ok := e.(map[string]interface{}); ok {
+// entry := ACLEntryStream{
+// EntityType: fmt.Sprintf("%v", entryMap["entityType"]),
+// Entity: fmt.Sprintf("%v", entryMap["entity"]),
+// Permissions: fmt.Sprintf("%v", entryMap["permissions"]),
+// Action: fmt.Sprintf("%v", entryMap["action"]),
+// Success: entryMap["success"] == true,
+// Error: fmt.Sprintf("%v", entryMap["error"]),
+// }
+// entries = append(entries, entry)
+// }
+// }
+
+// var timestamp time.Time
+// if t, err := time.Parse(time.RFC3339, data["timestamp"].(string)); err == nil {
+// timestamp = t
+// }
+
+// return TransactionStreamData{
+// ID: fmt.Sprintf("%v", data["id"]),
+// SessionID: fmt.Sprintf("%v", data["sessionId"]),
+// Timestamp: timestamp,
+// Operation: fmt.Sprintf("%v", data["operation"]),
+// TargetPath: fmt.Sprintf("%v", data["targetPath"]),
+// Entries: entries,
+// Status: fmt.Sprintf("%v", data["status"]),
+// ErrorMsg: fmt.Sprintf("%v", data["errorMsg"]),
+// Output: fmt.Sprintf("%v", data["output"]),
+// ExecutedBy: fmt.Sprintf("%v", data["executedBy"]),
+// DurationMs: int64(mustFloat64(data["durationMs"])),
+// }, nil
+// }
+
+// /* returns if value is of float type */
+// func mustFloat64(val interface{}) float64 {
+// if f, ok := val.(float64); ok {
+// return f
+// }
+// return 0
+// }
diff --git a/internal/sessionmanager/sessionmanager.go b/internal/sessionmanager/sessionmanager.go
deleted file mode 100644
index e69de29..0000000
diff --git a/internal/token/token.go b/internal/token/token.go
new file mode 100644
index 0000000..b022db0
--- /dev/null
+++ b/internal/token/token.go
@@ -0,0 +1,90 @@
+package token
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/golang-jwt/jwt/v5"
+ "github.com/google/uuid"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+)
+
+/* generating jwt token for user identification with specified configs */
+func GenerateJWT(username string, sessionID uuid.UUID) (string, error) {
+ expiryHours := config.BackendConfig.AppInfo.SessionTimeout
+
+ /* generate JWT token with claims */
+ token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
+ "username": username,
+ "sessionID": sessionID,
+ "exp": time.Now().Add(time.Hour * time.Duration(expiryHours)).Unix(),
+ })
+
+ return token.SignedString([]byte(config.BackendConfig.BackendSecurity.JWTTokenSecret))
+}
+
+/* validate JWT token and return claims */
+func ValidateJWT(tokenString string) (jwt.MapClaims, error) {
+ /* parse the token */
+ token, err := jwt.Parse(tokenString, func(token *jwt.Token) (any, error) {
+ if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
+ return nil, fmt.Errorf("unexpected signing method")
+ }
+ return []byte(config.BackendConfig.BackendSecurity.JWTTokenSecret), nil
+ })
+
+ /* check if token is valid */
+ if err != nil {
+ return nil, fmt.Errorf("JWT parsing error: %w", err)
+ }
+
+ /* check if token is valid */
+ if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {
+ return claims, nil
+ }
+
+ return nil, fmt.Errorf("invalid token")
+}
+
+/* extracts username and sessionID from JWT token */
+func GetDataFromJWT(tokenString string) (string, string, error) {
+ /* get claims from JWT Token */
+ claims, err := ValidateJWT(tokenString)
+ if err != nil {
+ return "", "", fmt.Errorf("JWT validation error: %w", err)
+ }
+
+ /* extract username from JWT Token */
+ username, ok := claims["username"].(string)
+ if !ok {
+ return "", "", fmt.Errorf("username not found in token")
+ }
+
+ /* extract sessionID from JWT Token */
+ sessionID, ok := claims["sessionID"].(string)
+ if !ok {
+ return "", "", fmt.Errorf("sessionID not found in token")
+ }
+ return username, sessionID, nil
+}
+
+/* extract username and sessionID from http request (wrapper around GetUsernameFromJWT for http requests) */
+func ExtractDataFromRequest(r *http.Request) (string, string, error) {
+ /* get the authorization header */
+ authHeader := r.Header.Get("Authorization")
+ if authHeader == "" {
+ return "", "", fmt.Errorf("authorization header not found")
+ }
+
+ /* check if the header is in the correct format */
+ parts := strings.Split(authHeader, " ")
+ if len(parts) != 2 || parts[0] != "Bearer" {
+ return "", "", fmt.Errorf("invalid authorization header format")
+ }
+
+ /* extract username and sessionID from JWT token */
+ return GetDataFromJWT(parts[1])
+}
diff --git a/internal/transprocessor/localtxn.go b/internal/transprocessor/localtxn.go
new file mode 100644
index 0000000..c27b001
--- /dev/null
+++ b/internal/transprocessor/localtxn.go
@@ -0,0 +1,82 @@
+package transprocessor
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+)
+
+/* maintains locks on file which are actively under ACL modifications */
+var pathLocks sync.Map
+
+/* locks a given file path */
+func getPathLock(path string) *sync.Mutex {
+ mtx, _ := pathLocks.LoadOrStore(path, &sync.Mutex{})
+ return mtx.(*sync.Mutex)
+}
+
+/* handles local transaction execution (change permissions via mounts) */
+func (p *PermProcessor) HandleLocalTransaction(txn *types.Transaction, absolutePath string) error {
+ aclEntry := BuildACLEntry(txn.Entries)
+
+ /* lock the file path for thread safety (ensure unlock even on panic) */
+ lock := getPathLock(absolutePath)
+ lock.Lock()
+ defer lock.Unlock()
+
+ /* execute the ACL modifications with acl commands */
+ var cmd *exec.Cmd
+ switch txn.Entries.Action {
+ case "add", "modify":
+ cmd = exec.Command("setfacl", "-m", aclEntry, absolutePath)
+ case "remove":
+ cmd = exec.Command("setfacl", "-x", aclEntry, absolutePath)
+ default:
+ // sendResponse(conn, false, "Unsupported action: "+req.Action)
+ txn.ErrorMsg = fmt.Sprintf("unsupported ACL action: %s", txn.Entries.Action)
+ }
+
+ start := time.Now()
+
+ output, err := cmd.CombinedOutput()
+
+ duration := time.Since(start).Milliseconds()
+
+ txn.Output = string(output)
+ txn.DurationMs = duration
+
+ if err != nil {
+ /* status of transaction is successful but execution failed */
+ txn.Status = types.StatusSuccess
+ txn.ExecStatus = false
+ txn.ErrorMsg = err.Error()
+
+ txn.ErrorMsg = fmt.Sprintf("setfacl failed: %s, output: %s", err.Error(), output)
+ }
+
+ txn.Status = types.StatusSuccess
+ txn.ExecStatus = true
+
+ return nil
+}
+
+/* builds the ACL entry string for setfacl */
+func BuildACLEntry(entry types.ACLEntry) string {
+ var sb strings.Builder
+
+ if entry.IsDefault {
+ sb.WriteString("default:")
+ }
+
+ sb.WriteString(entry.EntityType)
+ sb.WriteString(":")
+ sb.WriteString(entry.Entity)
+ sb.WriteString(":")
+ sb.WriteString(entry.Permissions)
+
+ return sb.String()
+}
diff --git a/internal/transprocessor/model.go b/internal/transprocessor/model.go
new file mode 100644
index 0000000..5dd517d
--- /dev/null
+++ b/internal/transprocessor/model.go
@@ -0,0 +1,13 @@
+package transprocessor
+
+import "github.com/PythonHacker24/linux-acl-management-backend/internal/grpcpool"
+
+/*
+ transprocessor implements the transactions structure that whole project complies with
+*/
+
+/* permissions processor */
+type PermProcessor struct {
+ gRPCPool *grpcpool.ClientPool
+ errCh chan<- error
+}
diff --git a/internal/transprocessor/perm-processor.go b/internal/transprocessor/perm-processor.go
new file mode 100644
index 0000000..09cb643
--- /dev/null
+++ b/internal/transprocessor/perm-processor.go
@@ -0,0 +1,82 @@
+package transprocessor
+
+import (
+ "context"
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/grpcpool"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/session"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+)
+
+/* instanciate new permission processor */
+func NewPermProcessor(gRPCPool *grpcpool.ClientPool, errCh chan<- error) *PermProcessor {
+ return &PermProcessor{
+ gRPCPool: gRPCPool,
+ errCh: errCh,
+ }
+}
+
+/* processor for permissions manager */
+func (p *PermProcessor) Process(ctx context.Context, curSession *session.Session, txn *types.Transaction) error {
+
+ /* add complete information here + persistent logging in database */
+ zap.L().Info("Processing Transaction",
+ zap.String("user", curSession.Username),
+ )
+
+ select {
+ case <-ctx.Done():
+ /* close the processor */
+ zap.L().Warn("Transaction process stopped due to shutdown",
+ zap.String("user", curSession.Username),
+ )
+ return ctx.Err()
+ default:
+ /*
+ permprocessor hands over transactions to remoteprocessor/localprocessor depending upon request
+ remoteprocessor -> handles permissions on remote servers
+ localprocessor -> handles permissions on local system (where this backend is deployed)
+ */
+
+ /* this line decides between systems like BeeGFS and NFS due to difference in ACL execution */
+ isRemote, host, port, found, absolutePath := FindServerFromPath(txn.TargetPath)
+
+ zap.L().Info("Found server",
+ zap.String("targetPath", txn.TargetPath),
+ zap.String("isRemote", fmt.Sprintf("%t", isRemote)),
+ zap.String("host", host),
+ zap.Int("port", port),
+ zap.String("found", fmt.Sprintf("%t", found)),
+ zap.String("absolutePath", absolutePath),
+ )
+
+ if !found {
+ /* filepath is invalid, filesystem doesn't exist */
+ txn.ErrorMsg = "filesystem of given path doesn't exist"
+ } else {
+ if isRemote {
+ /* handle through daemons */
+ if err := p.HandleRemoteTransaction(host, port, txn, absolutePath); err != nil {
+ p.errCh <- err
+ return fmt.Errorf("failed to handle remote transaction")
+ }
+ } else {
+ /* handle locally */
+ if err := p.HandleLocalTransaction(txn, absolutePath); err != nil {
+ p.errCh <- err
+ return fmt.Errorf("failed to handler local transaction")
+ }
+ }
+ }
+
+ /* REMOVE THIS */
+ zap.L().Info("Completed Transaction",
+ zap.String("ID", txn.ID.String()),
+ )
+ }
+
+ return nil
+}
diff --git a/internal/transprocessor/processor.go b/internal/transprocessor/processor.go
new file mode 100644
index 0000000..d8e70dd
--- /dev/null
+++ b/internal/transprocessor/processor.go
@@ -0,0 +1,21 @@
+package transprocessor
+
+import (
+ "context"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/session"
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+)
+
+/*
+ the job of scheduler was to handle how transactions are allocated to executors
+ transprocessor's job is to open the content of transactions and take care of them onwards
+ so work of transactions was irrelevant to scheduler - transprocessor is responsible
+
+ also, this archirecture allows us to create mulitple processors in case we plan to extend in future
+*/
+
+/* transaction processor - pluggable to any scheduler */
+type TransactionProcessor interface {
+ Process(ctx context.Context, curSession *session.Session, transaction *types.Transaction) error
+}
diff --git a/internal/transprocessor/remotetxn.go b/internal/transprocessor/remotetxn.go
new file mode 100644
index 0000000..5660b99
--- /dev/null
+++ b/internal/transprocessor/remotetxn.go
@@ -0,0 +1,74 @@
+package transprocessor
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/types"
+ protos "github.com/PythonHacker24/linux-acl-management-backend/proto"
+)
+
+/* NEED TO ADD DURATION TIME -> EXISTS IN LOCAL */
+
+/* takes a transactions and attempts to execute it via daemons */
+func (p *PermProcessor) HandleRemoteTransaction(host string, port int, txn *types.Transaction, absolutePath string) error {
+
+ /* if gRPCPool is nil, return an error */
+ if p.gRPCPool == nil {
+ return fmt.Errorf("gRPC pool is nil")
+ }
+
+ /* get connection to the respective daemon */
+ address := fmt.Sprintf("%s:%d", host, port)
+ conn, err := p.gRPCPool.GetConn(address, p.errCh)
+ if err != nil {
+ p.errCh <- err
+ return fmt.Errorf("failed to connect with a daemon: %s", address)
+ }
+
+ /* make it a for loop for interating all entries */
+ aclpayload := &protos.ACLEntry{
+ EntityType: txn.Entries.EntityType,
+ Entity: txn.Entries.Entity,
+ Permissions: txn.Entries.Permissions,
+ Action: txn.Entries.Action,
+ IsDefault: txn.Entries.IsDefault,
+ }
+
+ /* build the request for daemon */
+ request := &protos.ApplyACLRequest{
+ TransactionID: txn.ID.String(),
+ TargetPath: absolutePath,
+ Entry: aclpayload,
+ }
+
+ /* MAKE IT CONFIGURABLE */
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+
+ aclClient := protos.NewACLServiceClient(conn)
+ aclResponse, err := aclClient.ApplyACLEntry(ctx, request)
+ if err != nil || aclResponse == nil {
+ p.errCh <- fmt.Errorf("failed to send ACL request to daemon")
+ cancel()
+ return err
+ }
+
+ if aclResponse.Success {
+
+ /*
+ this is a bit crude for now, let daemon set this
+ backend should not have control over execution
+ */
+
+ /* set transaction successful*/
+ txn.Output = "ACL executed successfully on filesystem servers"
+
+ txn.ExecStatus = true
+ } else {
+ txn.ErrorMsg = "ACL failed to get executed in the filesystem server"
+ }
+
+ cancel()
+ return nil
+}
diff --git a/internal/transprocessor/utils.go b/internal/transprocessor/utils.go
new file mode 100644
index 0000000..b48e213
--- /dev/null
+++ b/internal/transprocessor/utils.go
@@ -0,0 +1,27 @@
+package transprocessor
+
+import (
+ "path"
+ "strings"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+)
+
+func FindServerFromPath(filepath string) (isRemote bool, host string, port int, found bool, absolutePath string) {
+ /* search through all the servers */
+ for _, server := range config.BackendConfig.FileSystemServers {
+ /* check if the server path has the prefix for filepath */
+ if strings.HasPrefix(filepath, server.Path) {
+ absolutePath := strings.TrimPrefix(filepath, server.Path)
+ /* check if it's remote */
+ if server.Remote != nil {
+ return true, server.Remote.Host, server.Remote.Port, true, absolutePath
+ }
+ /* local filesystem */
+ return false, "", 0, true, path.Join(config.BackendConfig.AppInfo.BasePath, filepath)
+ }
+ }
+
+ /* filesystem not found */
+ return false, "", 0, false, ""
+}
diff --git a/internal/traversal/handler.go b/internal/traversal/handler.go
new file mode 100644
index 0000000..480c483
--- /dev/null
+++ b/internal/traversal/handler.go
@@ -0,0 +1,56 @@
+package traversal
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "go.uber.org/zap"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/internal/auth"
+)
+
+/*
+ user considers / to be the root of the file path
+ the backend transalates / to basepath/ securely
+ this translation needs to be done wherever necessary
+*/
+
+/* POST handler for listing files in given directory */
+func ListFilesInDirectory(w http.ResponseWriter, r *http.Request) {
+
+ /* extracting userID from request */
+ username, _, err := auth.ExtractDataFromRequest(r)
+ if err != nil {
+ zap.L().Error("Error during getting username in HandleListFiles handler",
+ zap.Error(err),
+ )
+ return
+ }
+
+ /* check if the request body is valid */
+ var listRequest ListRequest
+ err = json.NewDecoder(r.Body).Decode(&listRequest)
+ if err != nil {
+ http.Error(w, "Invalid request body", http.StatusBadRequest)
+ return
+ }
+
+ /* list all the files in given filepath */
+ entries, err := ListFiles(listRequest.FilePath, username)
+ if err != nil {
+ zap.L().Warn("File listing error",
+ zap.Error(err),
+ )
+ http.Error(w, "Failed to list files", http.StatusInternalServerError)
+ }
+
+ /* send the response back */
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(entries); err != nil {
+ zap.L().Error("Failed to encode response for listing request",
+ zap.Error(err),
+ )
+ http.Error(w, "Failed to encode response for listing request", http.StatusInternalServerError)
+ return
+ }
+}
diff --git a/internal/traversal/model.go b/internal/traversal/model.go
new file mode 100644
index 0000000..f769eb4
--- /dev/null
+++ b/internal/traversal/model.go
@@ -0,0 +1,18 @@
+package traversal
+
+/*
+file entry contains basic information about a file
+this information is displayed in the traversal view of the frontend
+*/
+type FileEntry struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ IsDir bool `json:"is_dir"`
+ Size int64 `json:"size"`
+ ModTime int64 `json:"mod_time"`
+}
+
+/* request for listing files in a given directory path */
+type ListRequest struct {
+ FilePath string `json:"file_path"`
+}
diff --git a/internal/traversal/traversal.go b/internal/traversal/traversal.go
new file mode 100644
index 0000000..182e70c
--- /dev/null
+++ b/internal/traversal/traversal.go
@@ -0,0 +1,153 @@
+package traversal
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+)
+
+/* comprehensive list of dangerous characters */
+var (
+ dangerousChars = []string{";", "|", "&", "`", "$", "(", ")", "<", ">", "{", "}", "[", "]", "\\", "'", "\""}
+)
+
+/* list files in a given directory with some basic information */
+func ListFiles(path string, userID string) ([]FileEntry, error) {
+ var entries []FileEntry
+
+ /* combine basePath with the requested path */
+ fullPath := filepath.Join(config.BackendConfig.AppInfo.BasePath, path)
+
+ /* clean the path to prevent directory traversal */
+ fullPath = filepath.Clean(fullPath)
+
+ /* ensure the path is still within the basePath (prevent directory traversal) */
+ if !strings.HasPrefix(fullPath, filepath.Clean(config.BackendConfig.AppInfo.BasePath)) {
+ zap.L().Warn("Path traversal attempt detected",
+ zap.String("path", path),
+ zap.String("full_path", fullPath),
+ )
+ return nil, fmt.Errorf("access denied: path outside allowed directory")
+ }
+
+ /* list all the files in the given directory */
+ files, err := os.ReadDir(fullPath)
+ if err != nil {
+ zap.L().Error("Failed to read directory",
+ zap.String("path", fullPath),
+ zap.Error(err),
+ )
+ return nil, fmt.Errorf("failed to read directory: %w", err)
+ }
+
+ /* retrieve information for each file in the directory */
+ for _, f := range files {
+ entryPath := filepath.Join(path, f.Name())
+ fullEntryPath := filepath.Join(fullPath, f.Name())
+
+ /* verify the entry is still within allowed directory */
+ if !strings.HasPrefix(fullEntryPath, filepath.Clean(config.BackendConfig.AppInfo.BasePath)) {
+ zap.L().Warn("Entry path outside allowed directory",
+ zap.String("entry", f.Name()),
+ zap.String("full_path", fullEntryPath),
+ )
+ continue
+ }
+
+ /* check ACL access using the file path */
+ isOwner, err := isOwner(fullEntryPath, userID)
+ if err != nil {
+ zap.L().Warn("Failed to check ownership, skipping file",
+ zap.String("path", fullEntryPath),
+ zap.String("user", userID),
+ zap.Error(err),
+ )
+ continue
+ }
+
+ if !isOwner {
+ continue
+ }
+
+ /* get file information */
+ info, err := os.Stat(fullEntryPath)
+ if err != nil {
+ zap.L().Warn("Error while getting file information",
+ zap.String("path", fullEntryPath),
+ zap.Error(err),
+ )
+ continue
+ }
+
+ entries = append(entries, FileEntry{
+ Name: f.Name(),
+ Path: entryPath,
+ IsDir: info.IsDir(),
+ Size: info.Size(),
+ ModTime: info.ModTime().Unix(),
+ })
+ }
+
+ return entries, nil
+}
+
+/*
+checks if the user is the owner of the file using getfacl
+*/
+func isOwner(filePath string, userCN string) (bool, error) {
+ cleanPath := filepath.Clean(filePath)
+
+ /* validation to ensure that the path doesn't contain dangerous characters */
+ for _, char := range dangerousChars {
+ if strings.Contains(cleanPath, char) {
+ zap.L().Warn("Illegal character detected in file path",
+ zap.String("path", cleanPath),
+ zap.String("character", char),
+ )
+ return false, fmt.Errorf("invalid character in file path")
+ }
+ }
+
+ /* get the file's ACL using getfacl with the file path directly */
+ cmd := exec.Command("getfacl", cleanPath)
+ output, err := cmd.Output()
+ if err != nil {
+ zap.L().Error("Failed to execute getfacl",
+ zap.String("path", cleanPath),
+ zap.Error(err),
+ )
+ return false, fmt.Errorf("failed to check file permissions: %w", err)
+ }
+
+ /* parse the getfacl output to check ownership */
+ lines := strings.Split(string(output), "\n")
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+
+ if strings.HasPrefix(line, "# owner:") {
+ owner := strings.TrimSpace(strings.TrimPrefix(line, "# owner:"))
+ if strings.EqualFold(owner, userCN) {
+ return true, nil
+ }
+ }
+
+ if strings.HasPrefix(line, "user:") && !strings.HasPrefix(line, "user::") {
+ parts := strings.Split(line, ":")
+ if len(parts) >= 3 {
+ aclUser := parts[1]
+ permissions := parts[2]
+ if strings.EqualFold(aclUser, userCN) && strings.Contains(permissions, "w") {
+ return true, nil
+ }
+ }
+ }
+ }
+
+ return false, nil
+}
diff --git a/internal/types/transaction.go b/internal/types/transaction.go
new file mode 100644
index 0000000..e383950
--- /dev/null
+++ b/internal/types/transaction.go
@@ -0,0 +1,98 @@
+package types
+
+import (
+ "time"
+
+ "github.com/google/uuid"
+)
+
+/*
+ contains shared definations where compete modulation was not possible
+ Eg. session and transprocesser need same transaction structure and updating seperate definations
+ needs rewriting same code multiple times.
+*/
+
+/* request body for scheduling transaction */
+type ScheduleTransactionRequest struct {
+ Operation OperationType `json:"operation"`
+ TargetPath string `json:"targetPath"`
+ Entries ACLEntry `json:"entries"`
+}
+
+/* represents the result of the transaction */
+type TxnStatus string
+
+/* defining transactions status types */
+const (
+ StatusPending TxnStatus = "pending"
+ StatusSuccess TxnStatus = "success"
+ StatusFailed TxnStatus = "failed"
+)
+
+/* represents what kind of ACL operation was performed */
+type OperationType string
+
+/* defining operating types */
+const (
+ OperationGetACL OperationType = "getfacl"
+ OperationSetACL OperationType = "setfacl"
+)
+
+/* represents an individual ACL rule attempted to be changed */
+type ACLEntry struct {
+ /* e.g., "user", "group", "mask", "other" */
+ EntityType string `json:"entityType"`
+
+ /*
+ username, group name, or blank
+ blank means it applies to the current owner/group (e.g., user::, group::, other::, mask::)
+ */
+ Entity string `json:"entity"`
+
+ /* e.g., "rwx", "rw-", etc. */
+ Permissions string `json:"permissions"`
+
+ /* e.g., "add", "modify", "remove" */
+ Action string `json:"action"`
+
+ /* whether this is a default ACL (i.e., applies to new files/subdirs) */
+ IsDefault bool `json:"isDefault"`
+
+ /* only set if failed */
+ Error string `json:"error,omitempty"`
+ Success bool `json:"success"`
+}
+
+/* holds the full state of a permission change operation */
+type Transaction struct {
+ ID uuid.UUID `json:"id"`
+ SessionID uuid.UUID `json:"sessionId"`
+ Timestamp time.Time `json:"timestamp"`
+
+ /* getfacl/setfacl */
+ Operation OperationType `json:"operation"`
+
+ /* File/directory affected */
+ TargetPath string `json:"targetPath"`
+
+ /* ACL entries involved */
+ Entries ACLEntry `json:"entries"`
+
+ /* success/failure/pending */
+ Status TxnStatus `json:"status"`
+
+ /* execution status */
+ ExecStatus bool `json:"execStatus"`
+
+ /* set if failed */
+ ErrorMsg string `json:"errorMsg,omitempty"`
+
+ /* stdout or stderr captured */
+ Output string `json:"output"`
+
+ /* user who triggered this */
+ ExecutedBy string `json:"executedBy"`
+
+ /* execution duration in ms */
+ DurationMs int64 `json:"durationMs"`
+}
diff --git a/internal/utils/utils.go b/internal/utils/utils.go
new file mode 100644
index 0000000..c1def87
--- /dev/null
+++ b/internal/utils/utils.go
@@ -0,0 +1,66 @@
+package utils
+
+import (
+ "log"
+ "os"
+
+ "github.com/google/uuid"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "gopkg.in/natefinch/lumberjack.v2"
+
+ "github.com/PythonHacker24/linux-acl-management-backend/config"
+)
+
+var (
+ Log *zap.Logger
+)
+
+/* initializes the zap logger and provides global logging */
+func InitLogger(isProduction bool) {
+ var encoder zapcore.Encoder
+ var writeSyncer zapcore.WriteSyncer
+ var logLevel zapcore.Level
+
+ /* check if the logging level is production */
+ if isProduction {
+ encoder = zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig())
+ logLevel = zapcore.InfoLevel
+ writeSyncer = zapcore.AddSync(&lumberjack.Logger{
+ Filename: config.BackendConfig.Logging.File,
+ MaxSize: config.BackendConfig.Logging.MaxSize, // MB
+ MaxBackups: config.BackendConfig.Logging.MaxBackups,
+ MaxAge: config.BackendConfig.Logging.MaxAge, // days
+ Compress: config.BackendConfig.Logging.Compress,
+ })
+ } else {
+
+ /* development level logging - configured for debug */
+ /* set the encoder to console encoder */
+ cfg := zap.NewDevelopmentEncoderConfig()
+ cfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
+ encoder = zapcore.NewConsoleEncoder(cfg)
+ logLevel = zapcore.DebugLevel
+ writeSyncer = zapcore.AddSync(os.Stdout)
+ }
+
+ /* create the core */
+ core := zapcore.NewCore(
+ encoder,
+ writeSyncer,
+ logLevel,
+ )
+
+ /* create the logger */
+ Log = zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
+
+ /* allow global logging with zap.L() - zap.L() is a global logger */
+ zap.ReplaceGlobals(Log)
+
+ log.Println("Initialized Zap Logger")
+}
+
+/* generate a new uuid */
+func GenerateTxnID() string {
+ return uuid.New().String()
+}
diff --git a/pkg/laclm-utils/laclm-utils.go b/pkg/laclm-utils/laclm-utils.go
deleted file mode 100644
index e69de29..0000000
diff --git a/proto/acl.pb.go b/proto/acl.pb.go
new file mode 100644
index 0000000..f4b1056
--- /dev/null
+++ b/proto/acl.pb.go
@@ -0,0 +1,288 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.3
+// source: proto/acl.proto
+
+package protos
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ACLEntry struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ EntityType string `protobuf:"bytes,1,opt,name=entity_type,json=entityType,proto3" json:"entity_type,omitempty"` // "user", "group", "mask", "other"
+ Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` // e.g., "alice", "", etc.
+ Permissions string `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions,omitempty"` // e.g., "rw-"
+ Action string `protobuf:"bytes,4,opt,name=action,proto3" json:"action,omitempty"` // "add", "modify", "remove"
+ IsDefault bool `protobuf:"varint,5,opt,name=is_default,json=isDefault,proto3" json:"is_default,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ACLEntry) Reset() {
+ *x = ACLEntry{}
+ mi := &file_proto_acl_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ACLEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ACLEntry) ProtoMessage() {}
+
+func (x *ACLEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_acl_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ACLEntry.ProtoReflect.Descriptor instead.
+func (*ACLEntry) Descriptor() ([]byte, []int) {
+ return file_proto_acl_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ACLEntry) GetEntityType() string {
+ if x != nil {
+ return x.EntityType
+ }
+ return ""
+}
+
+func (x *ACLEntry) GetEntity() string {
+ if x != nil {
+ return x.Entity
+ }
+ return ""
+}
+
+func (x *ACLEntry) GetPermissions() string {
+ if x != nil {
+ return x.Permissions
+ }
+ return ""
+}
+
+func (x *ACLEntry) GetAction() string {
+ if x != nil {
+ return x.Action
+ }
+ return ""
+}
+
+func (x *ACLEntry) GetIsDefault() bool {
+ if x != nil {
+ return x.IsDefault
+ }
+ return false
+}
+
+type ApplyACLRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ TransactionID string `protobuf:"bytes,1,opt,name=transactionID,proto3" json:"transactionID,omitempty"`
+ TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"`
+ Entry *ACLEntry `protobuf:"bytes,3,opt,name=entry,proto3" json:"entry,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ApplyACLRequest) Reset() {
+ *x = ApplyACLRequest{}
+ mi := &file_proto_acl_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ApplyACLRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ApplyACLRequest) ProtoMessage() {}
+
+func (x *ApplyACLRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_acl_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ApplyACLRequest.ProtoReflect.Descriptor instead.
+func (*ApplyACLRequest) Descriptor() ([]byte, []int) {
+ return file_proto_acl_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ApplyACLRequest) GetTransactionID() string {
+ if x != nil {
+ return x.TransactionID
+ }
+ return ""
+}
+
+func (x *ApplyACLRequest) GetTargetPath() string {
+ if x != nil {
+ return x.TargetPath
+ }
+ return ""
+}
+
+func (x *ApplyACLRequest) GetEntry() *ACLEntry {
+ if x != nil {
+ return x.Entry
+ }
+ return nil
+}
+
+type ApplyACLResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ApplyACLResponse) Reset() {
+ *x = ApplyACLResponse{}
+ mi := &file_proto_acl_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ApplyACLResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ApplyACLResponse) ProtoMessage() {}
+
+func (x *ApplyACLResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_acl_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ApplyACLResponse.ProtoReflect.Descriptor instead.
+func (*ApplyACLResponse) Descriptor() ([]byte, []int) {
+ return file_proto_acl_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ApplyACLResponse) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+
+func (x *ApplyACLResponse) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+var File_proto_acl_proto protoreflect.FileDescriptor
+
+const file_proto_acl_proto_rawDesc = "" +
+ "\n" +
+ "\x0fproto/acl.proto\x12\x03acl\"\x9c\x01\n" +
+ "\bACLEntry\x12\x1f\n" +
+ "\ventity_type\x18\x01 \x01(\tR\n" +
+ "entityType\x12\x16\n" +
+ "\x06entity\x18\x02 \x01(\tR\x06entity\x12 \n" +
+ "\vpermissions\x18\x03 \x01(\tR\vpermissions\x12\x16\n" +
+ "\x06action\x18\x04 \x01(\tR\x06action\x12\x1d\n" +
+ "\n" +
+ "is_default\x18\x05 \x01(\bR\tisDefault\"}\n" +
+ "\x0fApplyACLRequest\x12$\n" +
+ "\rtransactionID\x18\x01 \x01(\tR\rtransactionID\x12\x1f\n" +
+ "\vtarget_path\x18\x02 \x01(\tR\n" +
+ "targetPath\x12#\n" +
+ "\x05entry\x18\x03 \x01(\v2\r.acl.ACLEntryR\x05entry\"F\n" +
+ "\x10ApplyACLResponse\x12\x18\n" +
+ "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" +
+ "\amessage\x18\x02 \x01(\tR\amessage2J\n" +
+ "\n" +
+ "ACLService\x12<\n" +
+ "\rApplyACLEntry\x12\x14.acl.ApplyACLRequest\x1a\x15.acl.ApplyACLResponseBYZWgithub.com/PythonHacker24/linux-acl-management-aclapi/internal/grpcserver/protos;protosb\x06proto3"
+
+var (
+ file_proto_acl_proto_rawDescOnce sync.Once
+ file_proto_acl_proto_rawDescData []byte
+)
+
+func file_proto_acl_proto_rawDescGZIP() []byte {
+ file_proto_acl_proto_rawDescOnce.Do(func() {
+ file_proto_acl_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_acl_proto_rawDesc), len(file_proto_acl_proto_rawDesc)))
+ })
+ return file_proto_acl_proto_rawDescData
+}
+
+var file_proto_acl_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_proto_acl_proto_goTypes = []any{
+ (*ACLEntry)(nil), // 0: acl.ACLEntry
+ (*ApplyACLRequest)(nil), // 1: acl.ApplyACLRequest
+ (*ApplyACLResponse)(nil), // 2: acl.ApplyACLResponse
+}
+var file_proto_acl_proto_depIdxs = []int32{
+ 0, // 0: acl.ApplyACLRequest.entry:type_name -> acl.ACLEntry
+ 1, // 1: acl.ACLService.ApplyACLEntry:input_type -> acl.ApplyACLRequest
+ 2, // 2: acl.ACLService.ApplyACLEntry:output_type -> acl.ApplyACLResponse
+ 2, // [2:3] is the sub-list for method output_type
+ 1, // [1:2] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_proto_acl_proto_init() }
+func file_proto_acl_proto_init() {
+ if File_proto_acl_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_acl_proto_rawDesc), len(file_proto_acl_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_proto_acl_proto_goTypes,
+ DependencyIndexes: file_proto_acl_proto_depIdxs,
+ MessageInfos: file_proto_acl_proto_msgTypes,
+ }.Build()
+ File_proto_acl_proto = out.File
+ file_proto_acl_proto_goTypes = nil
+ file_proto_acl_proto_depIdxs = nil
+}
diff --git a/proto/acl.proto b/proto/acl.proto
new file mode 100644
index 0000000..6fc2aaf
--- /dev/null
+++ b/proto/acl.proto
@@ -0,0 +1,28 @@
+syntax = "proto3";
+
+package acl;
+
+option go_package = 'github.com/PythonHacker24/linux-acl-management-aclapi/internal/grpcserver/protos;protos';
+
+service ACLService {
+ rpc ApplyACLEntry (ApplyACLRequest) returns (ApplyACLResponse);
+}
+
+message ACLEntry {
+ string entity_type = 1; // "user", "group", "mask", "other"
+ string entity = 2; // e.g., "alice", "", etc.
+ string permissions = 3; // e.g., "rw-"
+ string action = 4; // "add", "modify", "remove"
+ bool is_default = 5;
+}
+
+message ApplyACLRequest {
+ string transactionID = 1;
+ string target_path = 2;
+ ACLEntry entry = 3;
+}
+
+message ApplyACLResponse {
+ bool success = 1;
+ string message = 2;
+}
diff --git a/proto/acl_grpc.pb.go b/proto/acl_grpc.pb.go
new file mode 100644
index 0000000..dd44388
--- /dev/null
+++ b/proto/acl_grpc.pb.go
@@ -0,0 +1,121 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.29.3
+// source: proto/acl.proto
+
+package protos
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ ACLService_ApplyACLEntry_FullMethodName = "/acl.ACLService/ApplyACLEntry"
+)
+
+// ACLServiceClient is the client API for ACLService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ACLServiceClient interface {
+ ApplyACLEntry(ctx context.Context, in *ApplyACLRequest, opts ...grpc.CallOption) (*ApplyACLResponse, error)
+}
+
+type aCLServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewACLServiceClient(cc grpc.ClientConnInterface) ACLServiceClient {
+ return &aCLServiceClient{cc}
+}
+
+func (c *aCLServiceClient) ApplyACLEntry(ctx context.Context, in *ApplyACLRequest, opts ...grpc.CallOption) (*ApplyACLResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(ApplyACLResponse)
+ err := c.cc.Invoke(ctx, ACLService_ApplyACLEntry_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ACLServiceServer is the server API for ACLService service.
+// All implementations must embed UnimplementedACLServiceServer
+// for forward compatibility.
+type ACLServiceServer interface {
+ ApplyACLEntry(context.Context, *ApplyACLRequest) (*ApplyACLResponse, error)
+ mustEmbedUnimplementedACLServiceServer()
+}
+
+// UnimplementedACLServiceServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedACLServiceServer struct{}
+
+func (UnimplementedACLServiceServer) ApplyACLEntry(context.Context, *ApplyACLRequest) (*ApplyACLResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ApplyACLEntry not implemented")
+}
+func (UnimplementedACLServiceServer) mustEmbedUnimplementedACLServiceServer() {}
+func (UnimplementedACLServiceServer) testEmbeddedByValue() {}
+
+// UnsafeACLServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ACLServiceServer will
+// result in compilation errors.
+type UnsafeACLServiceServer interface {
+ mustEmbedUnimplementedACLServiceServer()
+}
+
+func RegisterACLServiceServer(s grpc.ServiceRegistrar, srv ACLServiceServer) {
+ // If the following call pancis, it indicates UnimplementedACLServiceServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&ACLService_ServiceDesc, srv)
+}
+
+func _ACLService_ApplyACLEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ApplyACLRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ACLServiceServer).ApplyACLEntry(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ACLService_ApplyACLEntry_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ACLServiceServer).ApplyACLEntry(ctx, req.(*ApplyACLRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// ACLService_ServiceDesc is the grpc.ServiceDesc for ACLService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ACLService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "acl.ACLService",
+ HandlerType: (*ACLServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ApplyACLEntry",
+ Handler: _ACLService_ApplyACLEntry_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "proto/acl.proto",
+}
diff --git a/proto/ping.pb.go b/proto/ping.pb.go
new file mode 100644
index 0000000..96b96b0
--- /dev/null
+++ b/proto/ping.pb.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.3
+// source: proto/ping.proto
+
+package protos
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type PingRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *PingRequest) Reset() {
+ *x = PingRequest{}
+ mi := &file_proto_ping_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PingRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PingRequest) ProtoMessage() {}
+
+func (x *PingRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_ping_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PingRequest.ProtoReflect.Descriptor instead.
+func (*PingRequest) Descriptor() ([]byte, []int) {
+ return file_proto_ping_proto_rawDescGZIP(), []int{0}
+}
+
+type PingResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *PingResponse) Reset() {
+ *x = PingResponse{}
+ mi := &file_proto_ping_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PingResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PingResponse) ProtoMessage() {}
+
+func (x *PingResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_ping_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PingResponse.ProtoReflect.Descriptor instead.
+func (*PingResponse) Descriptor() ([]byte, []int) {
+ return file_proto_ping_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *PingResponse) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+var File_proto_ping_proto protoreflect.FileDescriptor
+
+const file_proto_ping_proto_rawDesc = "" +
+ "\n" +
+ "\x10proto/ping.proto\x12\x06protos\"\r\n" +
+ "\vPingRequest\"(\n" +
+ "\fPingResponse\x12\x18\n" +
+ "\amessage\x18\x01 \x01(\tR\amessage2@\n" +
+ "\vPingService\x121\n" +
+ "\x04Ping\x12\x13.protos.PingRequest\x1a\x14.protos.PingResponseBYZWgithub.com/PythonHacker24/linux-acl-management-aclapi/internal/grpcserver/protos;protosb\x06proto3"
+
+var (
+ file_proto_ping_proto_rawDescOnce sync.Once
+ file_proto_ping_proto_rawDescData []byte
+)
+
+func file_proto_ping_proto_rawDescGZIP() []byte {
+ file_proto_ping_proto_rawDescOnce.Do(func() {
+ file_proto_ping_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_ping_proto_rawDesc), len(file_proto_ping_proto_rawDesc)))
+ })
+ return file_proto_ping_proto_rawDescData
+}
+
+var file_proto_ping_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_proto_ping_proto_goTypes = []any{
+ (*PingRequest)(nil), // 0: protos.PingRequest
+ (*PingResponse)(nil), // 1: protos.PingResponse
+}
+var file_proto_ping_proto_depIdxs = []int32{
+ 0, // 0: protos.PingService.Ping:input_type -> protos.PingRequest
+ 1, // 1: protos.PingService.Ping:output_type -> protos.PingResponse
+ 1, // [1:2] is the sub-list for method output_type
+ 0, // [0:1] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_proto_ping_proto_init() }
+func file_proto_ping_proto_init() {
+ if File_proto_ping_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_ping_proto_rawDesc), len(file_proto_ping_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_proto_ping_proto_goTypes,
+ DependencyIndexes: file_proto_ping_proto_depIdxs,
+ MessageInfos: file_proto_ping_proto_msgTypes,
+ }.Build()
+ File_proto_ping_proto = out.File
+ file_proto_ping_proto_goTypes = nil
+ file_proto_ping_proto_depIdxs = nil
+}
diff --git a/proto/ping.proto b/proto/ping.proto
new file mode 100644
index 0000000..0d2369a
--- /dev/null
+++ b/proto/ping.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+
+package protos;
+
+option go_package = 'github.com/PythonHacker24/linux-acl-management-aclapi/internal/grpcserver/protos;protos';
+
+service PingService {
+ rpc Ping (PingRequest) returns (PingResponse);
+}
+
+message PingRequest {}
+
+message PingResponse {
+ string message = 1;
+}
diff --git a/proto/ping_grpc.pb.go b/proto/ping_grpc.pb.go
new file mode 100644
index 0000000..c1c7c11
--- /dev/null
+++ b/proto/ping_grpc.pb.go
@@ -0,0 +1,121 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.29.3
+// source: proto/ping.proto
+
+package protos
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ PingService_Ping_FullMethodName = "/protos.PingService/Ping"
+)
+
+// PingServiceClient is the client API for PingService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type PingServiceClient interface {
+ Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error)
+}
+
+type pingServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewPingServiceClient(cc grpc.ClientConnInterface) PingServiceClient {
+ return &pingServiceClient{cc}
+}
+
+func (c *pingServiceClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(PingResponse)
+ err := c.cc.Invoke(ctx, PingService_Ping_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// PingServiceServer is the server API for PingService service.
+// All implementations must embed UnimplementedPingServiceServer
+// for forward compatibility.
+type PingServiceServer interface {
+ Ping(context.Context, *PingRequest) (*PingResponse, error)
+ mustEmbedUnimplementedPingServiceServer()
+}
+
+// UnimplementedPingServiceServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedPingServiceServer struct{}
+
+func (UnimplementedPingServiceServer) Ping(context.Context, *PingRequest) (*PingResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented")
+}
+func (UnimplementedPingServiceServer) mustEmbedUnimplementedPingServiceServer() {}
+func (UnimplementedPingServiceServer) testEmbeddedByValue() {}
+
+// UnsafePingServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to PingServiceServer will
+// result in compilation errors.
+type UnsafePingServiceServer interface {
+ mustEmbedUnimplementedPingServiceServer()
+}
+
+func RegisterPingServiceServer(s grpc.ServiceRegistrar, srv PingServiceServer) {
+ // If the following call pancis, it indicates UnimplementedPingServiceServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&PingService_ServiceDesc, srv)
+}
+
+func _PingService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PingRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PingServiceServer).Ping(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: PingService_Ping_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PingServiceServer).Ping(ctx, req.(*PingRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// PingService_ServiceDesc is the grpc.ServiceDesc for PingService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var PingService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "protos.PingService",
+ HandlerType: (*PingServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Ping",
+ Handler: _PingService_Ping_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "proto/ping.proto",
+}
diff --git a/sqlc.yaml b/sqlc.yaml
new file mode 100644
index 0000000..1578110
--- /dev/null
+++ b/sqlc.yaml
@@ -0,0 +1,20 @@
+version: "2"
+sql:
+ - engine: "postgresql"
+ queries: "db/queries"
+ schema: "db/schema.sql"
+ gen:
+ go:
+ package: "postgresql"
+ out: "internal/postgresql"
+ sql_package: "pgx/v5"
+ emit_json_tags: true
+ emit_prepared_queries: true
+ emit_interface: true
+ # emit_exact_table_funcs: true
+ emit_empty_slices: true
+ overrides:
+ - column: "*.id"
+ go_type: "github.com/google/uuid.UUID"
+ - column: "*.session_id"
+ go_type: "github.com/google/uuid.UUID"
diff --git a/trans.json b/trans.json
new file mode 100644
index 0000000..b9db2e9
--- /dev/null
+++ b/trans.json
@@ -0,0 +1,1689 @@
+{
+ "type": "transaction_update",
+ "data": {
+ "session_id": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "transactions": [
+ {
+ "id": "377384ac-5150-488b-9b3b-9a87b20293ac",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:19:55.539192133Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5047
+ },
+ {
+ "id": "208a6783-3a98-4ee5-b652-b73e88b5d619",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:19:57.611573842Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2974
+ },
+ {
+ "id": "02859fcd-10ef-463a-a46d-91d6398613dc",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:19:56.576371383Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 4010
+ },
+ {
+ "id": "888578ce-3eb0-4856-9b91-f95adb4487c6",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:19:58.641457467Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1945
+ },
+ {
+ "id": "c6411fd6-3e2e-44ef-aff3-eb15b44ed2a9",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:45.483365961Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2972
+ },
+ {
+ "id": "0aab5a00-11a1-42f5-b29f-2568f7cd8b06",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:44.454141127Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 4001
+ },
+ {
+ "id": "fd5ac53a-bc4b-4fd8-9f71-2142ea573d9a",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:46.513228128Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1943
+ },
+ {
+ "id": "200152de-e2e1-47f4-8435-a47e8cf62da8",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:43.417411335Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5039
+ },
+ {
+ "id": "de255983-d7d8-4fb3-894b-859545849bd1",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:47.543978754Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 933
+ },
+ {
+ "id": "a822b2dd-e922-4cda-95a6-00c7c3c20b36",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:48.567198004Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1
+ },
+ {
+ "id": "df77f214-b16f-4127-a5d0-f089b248a382",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:49.597916421Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1
+ },
+ {
+ "id": "58a46980-05fd-4244-b5ea-147fbd10e8c8",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:50.628755213Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "58451ac0-7e07-4285-9296-a1b9a13ec1fa",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:51.654990506Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "7156e3c2-b9c1-4ba2-a646-813365067d4f",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:52.687491631Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "09a307a7-9511-454b-bb13-2939b30bec87",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:55.779916799Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2958
+ },
+ {
+ "id": "c5e807fa-40cd-44d3-8c3a-397838fc0ed2",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:53.720900257Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5017
+ },
+ {
+ "id": "57811baa-ed28-44ae-bcc0-cf0c8e9b16a1",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:56.810698258Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1927
+ },
+ {
+ "id": "e5c5e8f4-dc77-4d0a-9443-949e4ce35b87",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:54.751162382Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 3987
+ },
+ {
+ "id": "97c8109a-9381-4a1e-b49f-d4d984b565a7",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:57.8389078Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 911
+ },
+ {
+ "id": "2d8e1839-db99-4032-be67-7d7119ba057d",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:58.869435176Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "436acfde-6d9b-44e1-b2c1-4aae12a8392c",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:20:59.899248385Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "203be62d-345b-4669-a9b0-ccd870c0c828",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:00.924158135Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "08c61186-16c9-42bf-ba30-8111176d4fe0",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:01.953121261Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1
+ },
+ {
+ "id": "993af8a6-a7b8-4967-9409-f04fd79bcb3d",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:02.986921969Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "95763dff-9bef-41de-a564-99f26ddba397",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:04.018682137Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5027
+ },
+ {
+ "id": "e515c3a6-3924-40ef-8ece-3c0397ee399c",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:07.106753596Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1939
+ },
+ {
+ "id": "a42f44c9-3910-43e6-94ef-09083a24cfc9",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:05.049596304Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 3996
+ },
+ {
+ "id": "da5b5b54-c068-4c3e-aee1-811e8253d308",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:06.075456179Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2970
+ },
+ {
+ "id": "36f2b8ca-3c40-480d-8dff-db4bf0d5a559",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:08.134713639Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 917
+ },
+ {
+ "id": "efce8861-42e5-4edf-96e0-8a6b3a6b8495",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:09.165836708Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "4501b051-5f47-4fa8-b159-573680aa0a20",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:10.198888Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "880ecc04-dc0b-4ea1-a46c-6c0f006c039c",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:11.227483709Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "ca6228f3-6637-4f58-9b24-57870c023906",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:12.252500459Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "a3c4c962-f051-42a0-9cf7-4ef5965332cc",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:13.272417752Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "4fad4ee0-d1dc-44a2-bed2-ef4d8783c4e9",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:14.300204669Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5027
+ },
+ {
+ "id": "e1d275bf-c99f-4e23-acb9-0c238de36767",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:15.337219711Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 3990
+ },
+ {
+ "id": "547a0c11-48ca-4ec9-9c68-0e6bf39b34f9",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:17.400858337Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1927
+ },
+ {
+ "id": "f30d7980-9f79-44bf-aa1a-1452c16a6986",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:16.369401045Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2958
+ },
+ {
+ "id": "2083d95b-567f-4426-9244-e88af696e223",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:18.442704379Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 892
+ },
+ {
+ "id": "771f1f4b-379b-4cf3-9716-6844c61b3811",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:19.471885963Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "51be2f14-1f98-47ab-b547-9bb85976f140",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:20.49489563Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "8116ffd5-65ec-476d-9365-ca8bdd03bb7c",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:21.542684464Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1
+ },
+ {
+ "id": "344982e0-151f-4651-a5c8-0246d16916cd",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:22.574008006Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "d46172c1-246f-47bf-9d92-0355d956710b",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:23.603847548Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "5dcb76c3-e14a-4907-ab48-8a7a92c0b9c6",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:24.638855882Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5023
+ },
+ {
+ "id": "951d5cd6-1569-483d-895d-42dda1a5b529",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:27.735915634Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1926
+ },
+ {
+ "id": "b0a04cb0-2444-40e5-87a1-576307a751fa",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:26.70241305Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2960
+ },
+ {
+ "id": "2f375d31-ddb7-46ef-ac06-bf748ee7f31a",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:25.669617591Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 3993
+ },
+ {
+ "id": "0a01e2bf-5928-4585-8b3d-e4242cdf5122",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:28.768099551Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 908
+ },
+ {
+ "id": "bd40daf0-5205-43c0-ae9c-460855665180",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:29.798931551Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "d3f74fe9-41ac-49f7-9053-d48de8abc6be",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:30.829165135Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "cbb8428c-02f2-4b8b-8af7-ea9de16c0b26",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:31.857656844Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "264fe7d1-9553-43e4-a7ee-81423e1b91b8",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:32.888017969Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "da34dc26-d569-4dac-ab7a-1eca52a1f0dc",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:33.922098553Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1
+ },
+ {
+ "id": "3f7e91ff-10ef-493b-bb63-50592b98f1da",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:38.071134805Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1923
+ },
+ {
+ "id": "e985d6d7-07a5-46f7-b389-6bbc6ad6db85",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:35.996089762Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 3999
+ },
+ {
+ "id": "2fa330b6-683c-474f-8aa6-9863b4f8577b",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:34.95962547Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5035
+ },
+ {
+ "id": "35ca8fc4-55c2-45f3-8f45-d6ba522d2e99",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:37.034021388Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2961
+ },
+ {
+ "id": "3fac3d38-7e65-450d-9be3-5d51e61c285d",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:39.109838583Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 898
+ },
+ {
+ "id": "b0a30d92-e7f9-4674-9ccd-08ac2b86eb1f",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:40.140427542Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "7727721d-4329-4923-9f40-908febba72be",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:41.175902501Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2
+ },
+ {
+ "id": "d7d3d810-5fdb-417a-8db6-98560d7002d3",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:42.222340084Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1
+ },
+ {
+ "id": "c01b91f7-1fb0-4d6d-820a-201843397429",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:43.261692668Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "8e98f68a-c813-432d-bb67-7873385d2de7",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:44.291233085Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1
+ },
+ {
+ "id": "50956d8c-25c8-4e58-aef5-5d35e95c1543",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:45.324548378Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5021
+ },
+ {
+ "id": "a2a7a0fa-abce-477f-8341-b60ee6ac08d5",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:47.40122317Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2946
+ },
+ {
+ "id": "123abf3b-dd11-4fa0-920f-63941688f31e",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:48.433981921Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1913
+ },
+ {
+ "id": "10d566a7-712b-4e28-ab8f-3c481b3d9c1c",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:49.471165796Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 890
+ },
+ {
+ "id": "0241c3e6-1060-4f57-a92b-e1bcefbdec73",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:21:46.36116942Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 3986
+ },
+ {
+ "id": "fdfd5f4e-fdce-4b22-9352-76b66e52aec9",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:14.101428919Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 3988
+ },
+ {
+ "id": "fdb8ca82-457b-4b79-854a-4f77d4f4bb62",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:13.06736871Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5022
+ },
+ {
+ "id": "0a0a5a69-f231-40ce-87a1-ed11365c42fa",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:16.153670961Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1936
+ },
+ {
+ "id": "4217356d-14f4-4d44-9e66-48aa82fa73af",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:15.121899169Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2968
+ },
+ {
+ "id": "8b45ceb9-9831-4596-87b5-c64ad2eac3a1",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:17.19300767Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 904
+ },
+ {
+ "id": "19adf19a-5a09-4d15-8aa1-f222ca426db1",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:18.224942629Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "67e9e615-2184-472e-aca0-9b2e35b557e4",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:19.268614671Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "b5798d9a-2311-4ac7-b6ba-174d8017e66e",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:20.303264713Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1
+ },
+ {
+ "id": "9ad05148-3bfa-437b-af45-40fe214c80cb",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:21.333222089Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "39bae5f3-495c-4456-a7d2-9a032068f9aa",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:22.364232006Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 0
+ },
+ {
+ "id": "dc0ba2fe-1c2b-40df-9160-61c300285299",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:23.396925298Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 5017
+ },
+ {
+ "id": "6c6f0028-b21a-4cb5-9568-308455d2dfb0",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:26.495702633Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 1919
+ },
+ {
+ "id": "2ffecd3e-9c44-4b21-9b8f-21a88976684e",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:24.427421632Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 3987
+ },
+ {
+ "id": "fcd6d82c-c126-4ab2-88dc-54b7b125d786",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:25.462212924Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 2953
+ },
+ {
+ "id": "ad10c7e1-16ef-4d0f-9861-c04383dfd1e0",
+ "sessionId": "c9233899-7fd2-47b1-8652-1b05ad94511b",
+ "timestamp": "2025-07-27T09:24:27.526038925Z",
+ "operation": "setfacl",
+ "targetPath": "/mnt/nfs-system/home/maverick/linux-acl-management-aclapi/README.md",
+ "entries": {
+ "entityType": "user",
+ "entity": "tommy",
+ "permissions": "rw-",
+ "action": "modify",
+ "isDefault": false,
+ "success": false
+ },
+ "status": "success",
+ "execStatus": false,
+ "output": "",
+ "executedBy": "Gregory House",
+ "durationMs": 899
+ }
+ ]
+ },
+ "timestamp": "2025-07-27T09:25:04.166194553Z"
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/.travis.yml b/vendor/github.com/Azure/go-ntlmssp/.travis.yml
new file mode 100644
index 0000000..23c95fe
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/.travis.yml
@@ -0,0 +1,17 @@
+sudo: false
+
+language: go
+
+before_script:
+ - go get -u golang.org/x/lint/golint
+
+go:
+ - 1.10.x
+ - master
+
+script:
+ - test -z "$(gofmt -s -l . | tee /dev/stderr)"
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - go vet ./...
+ - go build -v ./...
+ - go test -v ./...
diff --git a/vendor/github.com/Azure/go-ntlmssp/LICENSE b/vendor/github.com/Azure/go-ntlmssp/LICENSE
new file mode 100644
index 0000000..dc1cf39
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/Azure/go-ntlmssp/README.md b/vendor/github.com/Azure/go-ntlmssp/README.md
new file mode 100644
index 0000000..55cdcef
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/README.md
@@ -0,0 +1,29 @@
+# go-ntlmssp
+Golang package that provides NTLM/Negotiate authentication over HTTP
+
+[](https://godoc.org/github.com/Azure/go-ntlmssp) [](https://travis-ci.org/Azure/go-ntlmssp)
+
+Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx
+Implementation hints from http://davenport.sourceforge.net/ntlm.html
+
+This package only implements authentication, no key exchange or encryption. It
+only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding.
+This package implements NTLMv2.
+
+# Usage
+
+```
+url, user, password := "http://www.example.com/secrets", "robpike", "pw123"
+client := &http.Client{
+ Transport: ntlmssp.Negotiator{
+ RoundTripper:&http.Transport{},
+ },
+}
+
+req, _ := http.NewRequest("GET", url, nil)
+req.SetBasicAuth(user, password)
+res, _ := client.Do(req)
+```
+
+-----
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/go-ntlmssp/SECURITY.md b/vendor/github.com/Azure/go-ntlmssp/SECURITY.md
new file mode 100644
index 0000000..e138ec5
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
diff --git a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go
new file mode 100644
index 0000000..ab183db
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go
@@ -0,0 +1,187 @@
+package ntlmssp
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "strings"
+ "time"
+)
+
+type authenicateMessage struct {
+ LmChallengeResponse []byte
+ NtChallengeResponse []byte
+
+ TargetName string
+ UserName string
+
+ // only set if negotiateFlag_NTLMSSP_NEGOTIATE_KEY_EXCH
+ EncryptedRandomSessionKey []byte
+
+ NegotiateFlags negotiateFlags
+
+ MIC []byte
+}
+
+type authenticateMessageFields struct {
+ messageHeader
+ LmChallengeResponse varField
+ NtChallengeResponse varField
+ TargetName varField
+ UserName varField
+ Workstation varField
+ _ [8]byte
+ NegotiateFlags negotiateFlags
+}
+
+func (m authenicateMessage) MarshalBinary() ([]byte, error) {
+ if !m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE) {
+ return nil, errors.New("Only unicode is supported")
+ }
+
+ target, user := toUnicode(m.TargetName), toUnicode(m.UserName)
+ workstation := toUnicode("")
+
+ ptr := binary.Size(&authenticateMessageFields{})
+ f := authenticateMessageFields{
+ messageHeader: newMessageHeader(3),
+ NegotiateFlags: m.NegotiateFlags,
+ LmChallengeResponse: newVarField(&ptr, len(m.LmChallengeResponse)),
+ NtChallengeResponse: newVarField(&ptr, len(m.NtChallengeResponse)),
+ TargetName: newVarField(&ptr, len(target)),
+ UserName: newVarField(&ptr, len(user)),
+ Workstation: newVarField(&ptr, len(workstation)),
+ }
+
+ f.NegotiateFlags.Unset(negotiateFlagNTLMSSPNEGOTIATEVERSION)
+
+ b := bytes.Buffer{}
+ if err := binary.Write(&b, binary.LittleEndian, &f); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&b, binary.LittleEndian, &m.LmChallengeResponse); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&b, binary.LittleEndian, &m.NtChallengeResponse); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&b, binary.LittleEndian, &target); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&b, binary.LittleEndian, &user); err != nil {
+ return nil, err
+ }
+ if err := binary.Write(&b, binary.LittleEndian, &workstation); err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
+}
+
+//ProcessChallenge crafts an AUTHENTICATE message in response to the CHALLENGE message
+//that was received from the server
+func ProcessChallenge(challengeMessageData []byte, user, password string, domainNeeded bool) ([]byte, error) {
+ if user == "" && password == "" {
+ return nil, errors.New("Anonymous authentication not supported")
+ }
+
+ var cm challengeMessage
+ if err := cm.UnmarshalBinary(challengeMessageData); err != nil {
+ return nil, err
+ }
+
+ if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) {
+ return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)")
+ }
+ if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) {
+ return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)")
+ }
+
+ if !domainNeeded {
+ cm.TargetName = ""
+ }
+
+ am := authenicateMessage{
+ UserName: user,
+ TargetName: cm.TargetName,
+ NegotiateFlags: cm.NegotiateFlags,
+ }
+
+ timestamp := cm.TargetInfo[avIDMsvAvTimestamp]
+ if timestamp == nil { // no time sent, take current time
+ ft := uint64(time.Now().UnixNano()) / 100
+ ft += 116444736000000000 // add time between unix & windows offset
+ timestamp = make([]byte, 8)
+ binary.LittleEndian.PutUint64(timestamp, ft)
+ }
+
+ clientChallenge := make([]byte, 8)
+ rand.Reader.Read(clientChallenge)
+
+ ntlmV2Hash := getNtlmV2Hash(password, user, cm.TargetName)
+
+ am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash,
+ cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw)
+
+ if cm.TargetInfoRaw == nil {
+ am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash,
+ cm.ServerChallenge[:], clientChallenge)
+ }
+ return am.MarshalBinary()
+}
+
+func ProcessChallengeWithHash(challengeMessageData []byte, user, hash string) ([]byte, error) {
+ if user == "" && hash == "" {
+ return nil, errors.New("Anonymous authentication not supported")
+ }
+
+ var cm challengeMessage
+ if err := cm.UnmarshalBinary(challengeMessageData); err != nil {
+ return nil, err
+ }
+
+ if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) {
+ return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)")
+ }
+ if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) {
+ return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)")
+ }
+
+ am := authenicateMessage{
+ UserName: user,
+ TargetName: cm.TargetName,
+ NegotiateFlags: cm.NegotiateFlags,
+ }
+
+ timestamp := cm.TargetInfo[avIDMsvAvTimestamp]
+ if timestamp == nil { // no time sent, take current time
+ ft := uint64(time.Now().UnixNano()) / 100
+ ft += 116444736000000000 // add time between unix & windows offset
+ timestamp = make([]byte, 8)
+ binary.LittleEndian.PutUint64(timestamp, ft)
+ }
+
+ clientChallenge := make([]byte, 8)
+ rand.Reader.Read(clientChallenge)
+
+ hashParts := strings.Split(hash, ":")
+ if len(hashParts) > 1 {
+ hash = hashParts[1]
+ }
+ hashBytes, err := hex.DecodeString(hash)
+ if err != nil {
+ return nil, err
+ }
+ ntlmV2Hash := hmacMd5(hashBytes, toUnicode(strings.ToUpper(user)+cm.TargetName))
+
+ am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash,
+ cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw)
+
+ if cm.TargetInfoRaw == nil {
+ am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash,
+ cm.ServerChallenge[:], clientChallenge)
+ }
+ return am.MarshalBinary()
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/authheader.go b/vendor/github.com/Azure/go-ntlmssp/authheader.go
new file mode 100644
index 0000000..c9d30d3
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/authheader.go
@@ -0,0 +1,66 @@
+package ntlmssp
+
+import (
+ "encoding/base64"
+ "strings"
+)
+
+type authheader []string
+
+func (h authheader) IsBasic() bool {
+ for _, s := range h {
+ if strings.HasPrefix(string(s), "Basic ") {
+ return true
+ }
+ }
+ return false
+}
+
+func (h authheader) Basic() string {
+ for _, s := range h {
+ if strings.HasPrefix(string(s), "Basic ") {
+ return s
+ }
+ }
+ return ""
+}
+
+func (h authheader) IsNegotiate() bool {
+ for _, s := range h {
+ if strings.HasPrefix(string(s), "Negotiate") {
+ return true
+ }
+ }
+ return false
+}
+
+func (h authheader) IsNTLM() bool {
+ for _, s := range h {
+ if strings.HasPrefix(string(s), "NTLM") {
+ return true
+ }
+ }
+ return false
+}
+
+func (h authheader) GetData() ([]byte, error) {
+ for _, s := range h {
+ if strings.HasPrefix(string(s), "NTLM") || strings.HasPrefix(string(s), "Negotiate") || strings.HasPrefix(string(s), "Basic ") {
+ p := strings.Split(string(s), " ")
+ if len(p) < 2 {
+ return nil, nil
+ }
+ return base64.StdEncoding.DecodeString(string(p[1]))
+ }
+ }
+ return nil, nil
+}
+
+func (h authheader) GetBasicCreds() (username, password string, err error) {
+ d, err := h.GetData()
+ if err != nil {
+ return "", "", err
+ }
+ parts := strings.SplitN(string(d), ":", 2)
+ return parts[0], parts[1], nil
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/avids.go b/vendor/github.com/Azure/go-ntlmssp/avids.go
new file mode 100644
index 0000000..196b5f1
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/avids.go
@@ -0,0 +1,17 @@
+package ntlmssp
+
+type avID uint16
+
+const (
+ avIDMsvAvEOL avID = iota
+ avIDMsvAvNbComputerName
+ avIDMsvAvNbDomainName
+ avIDMsvAvDNSComputerName
+ avIDMsvAvDNSDomainName
+ avIDMsvAvDNSTreeName
+ avIDMsvAvFlags
+ avIDMsvAvTimestamp
+ avIDMsvAvSingleHost
+ avIDMsvAvTargetName
+ avIDMsvChannelBindings
+)
diff --git a/vendor/github.com/Azure/go-ntlmssp/challenge_message.go b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go
new file mode 100644
index 0000000..053b55e
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go
@@ -0,0 +1,82 @@
+package ntlmssp
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+)
+
+type challengeMessageFields struct {
+ messageHeader
+ TargetName varField
+ NegotiateFlags negotiateFlags
+ ServerChallenge [8]byte
+ _ [8]byte
+ TargetInfo varField
+}
+
+func (m challengeMessageFields) IsValid() bool {
+ return m.messageHeader.IsValid() && m.MessageType == 2
+}
+
+type challengeMessage struct {
+ challengeMessageFields
+ TargetName string
+ TargetInfo map[avID][]byte
+ TargetInfoRaw []byte
+}
+
+func (m *challengeMessage) UnmarshalBinary(data []byte) error {
+ r := bytes.NewReader(data)
+ err := binary.Read(r, binary.LittleEndian, &m.challengeMessageFields)
+ if err != nil {
+ return err
+ }
+ if !m.challengeMessageFields.IsValid() {
+ return fmt.Errorf("Message is not a valid challenge message: %+v", m.challengeMessageFields.messageHeader)
+ }
+
+ if m.challengeMessageFields.TargetName.Len > 0 {
+ m.TargetName, err = m.challengeMessageFields.TargetName.ReadStringFrom(data, m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE))
+ if err != nil {
+ return err
+ }
+ }
+
+ if m.challengeMessageFields.TargetInfo.Len > 0 {
+ d, err := m.challengeMessageFields.TargetInfo.ReadFrom(data)
+ m.TargetInfoRaw = d
+ if err != nil {
+ return err
+ }
+ m.TargetInfo = make(map[avID][]byte)
+ r := bytes.NewReader(d)
+ for {
+ var id avID
+ var l uint16
+ err = binary.Read(r, binary.LittleEndian, &id)
+ if err != nil {
+ return err
+ }
+ if id == avIDMsvAvEOL {
+ break
+ }
+
+ err = binary.Read(r, binary.LittleEndian, &l)
+ if err != nil {
+ return err
+ }
+ value := make([]byte, l)
+ n, err := r.Read(value)
+ if err != nil {
+ return err
+ }
+ if n != int(l) {
+ return fmt.Errorf("Expected to read %d bytes, got only %d", l, n)
+ }
+ m.TargetInfo[id] = value
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/messageheader.go b/vendor/github.com/Azure/go-ntlmssp/messageheader.go
new file mode 100644
index 0000000..247e284
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/messageheader.go
@@ -0,0 +1,21 @@
+package ntlmssp
+
+import (
+ "bytes"
+)
+
+var signature = [8]byte{'N', 'T', 'L', 'M', 'S', 'S', 'P', 0}
+
+type messageHeader struct {
+ Signature [8]byte
+ MessageType uint32
+}
+
+func (h messageHeader) IsValid() bool {
+ return bytes.Equal(h.Signature[:], signature[:]) &&
+ h.MessageType > 0 && h.MessageType < 4
+}
+
+func newMessageHeader(messageType uint32) messageHeader {
+ return messageHeader{signature, messageType}
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go
new file mode 100644
index 0000000..5905c02
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go
@@ -0,0 +1,52 @@
+package ntlmssp
+
+type negotiateFlags uint32
+
+const (
+ /*A*/ negotiateFlagNTLMSSPNEGOTIATEUNICODE negotiateFlags = 1 << 0
+ /*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1
+ /*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2
+
+ /*D*/
+ negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4
+ /*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5
+ /*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6
+ /*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7
+
+ /*H*/
+ negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9
+
+ /*J*/
+ negotiateFlagANONYMOUS = 1 << 11
+ /*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12
+ /*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13
+
+ /*M*/
+ negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15
+ /*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16
+ /*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17
+
+ /*P*/
+ negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19
+ /*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20
+
+ /*R*/
+ negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22
+ /*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23
+
+ /*T*/
+ negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25
+
+ /*U*/
+ negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29
+ /*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30
+ /*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31
+)
+
+func (field negotiateFlags) Has(flags negotiateFlags) bool {
+ return field&flags == flags
+}
+
+func (field *negotiateFlags) Unset(flags negotiateFlags) {
+ *field = *field ^ (*field & flags)
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go
new file mode 100644
index 0000000..e466a98
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go
@@ -0,0 +1,64 @@
+package ntlmssp
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "strings"
+)
+
+const expMsgBodyLen = 40
+
+type negotiateMessageFields struct {
+ messageHeader
+ NegotiateFlags negotiateFlags
+
+ Domain varField
+ Workstation varField
+
+ Version
+}
+
+var defaultFlags = negotiateFlagNTLMSSPNEGOTIATETARGETINFO |
+ negotiateFlagNTLMSSPNEGOTIATE56 |
+ negotiateFlagNTLMSSPNEGOTIATE128 |
+ negotiateFlagNTLMSSPNEGOTIATEUNICODE |
+ negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY
+
+//NewNegotiateMessage creates a new NEGOTIATE message with the
+//flags that this package supports.
+func NewNegotiateMessage(domainName, workstationName string) ([]byte, error) {
+ payloadOffset := expMsgBodyLen
+ flags := defaultFlags
+
+ if domainName != "" {
+ flags |= negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED
+ }
+
+ if workstationName != "" {
+ flags |= negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED
+ }
+
+ msg := negotiateMessageFields{
+ messageHeader: newMessageHeader(1),
+ NegotiateFlags: flags,
+ Domain: newVarField(&payloadOffset, len(domainName)),
+ Workstation: newVarField(&payloadOffset, len(workstationName)),
+ Version: DefaultVersion(),
+ }
+
+ b := bytes.Buffer{}
+ if err := binary.Write(&b, binary.LittleEndian, &msg); err != nil {
+ return nil, err
+ }
+ if b.Len() != expMsgBodyLen {
+ return nil, errors.New("incorrect body length")
+ }
+
+ payload := strings.ToUpper(domainName + workstationName)
+ if _, err := b.WriteString(payload); err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiator.go b/vendor/github.com/Azure/go-ntlmssp/negotiator.go
new file mode 100644
index 0000000..cce4955
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/negotiator.go
@@ -0,0 +1,151 @@
+package ntlmssp
+
+import (
+ "bytes"
+ "encoding/base64"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+)
+
+// GetDomain : parse domain name from based on slashes in the input
+// Need to check for upn as well
+func GetDomain(user string) (string, string, bool) {
+ domain := ""
+ domainNeeded := false
+
+ if strings.Contains(user, "\\") {
+ ucomponents := strings.SplitN(user, "\\", 2)
+ domain = ucomponents[0]
+ user = ucomponents[1]
+ domainNeeded = true
+ } else if strings.Contains(user, "@") {
+ domainNeeded = false
+ } else {
+ domainNeeded = true
+ }
+ return user, domain, domainNeeded
+}
+
+//Negotiator is a http.Roundtripper decorator that automatically
+//converts basic authentication to NTLM/Negotiate authentication when appropriate.
+type Negotiator struct{ http.RoundTripper }
+
+//RoundTrip sends the request to the server, handling any authentication
+//re-sends as needed.
+func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ // Use default round tripper if not provided
+ rt := l.RoundTripper
+ if rt == nil {
+ rt = http.DefaultTransport
+ }
+ // If it is not basic auth, just round trip the request as usual
+ reqauth := authheader(req.Header.Values("Authorization"))
+ if !reqauth.IsBasic() {
+ return rt.RoundTrip(req)
+ }
+ reqauthBasic := reqauth.Basic()
+ // Save request body
+ body := bytes.Buffer{}
+ if req.Body != nil {
+ _, err = body.ReadFrom(req.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Body.Close()
+ req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
+ }
+ // first try anonymous, in case the server still finds us
+ // authenticated from previous traffic
+ req.Header.Del("Authorization")
+ res, err = rt.RoundTrip(req)
+ if err != nil {
+ return nil, err
+ }
+ if res.StatusCode != http.StatusUnauthorized {
+ return res, err
+ }
+ resauth := authheader(res.Header.Values("Www-Authenticate"))
+ if !resauth.IsNegotiate() && !resauth.IsNTLM() {
+ // Unauthorized, Negotiate not requested, let's try with basic auth
+ req.Header.Set("Authorization", string(reqauthBasic))
+ io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+ req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
+
+ res, err = rt.RoundTrip(req)
+ if err != nil {
+ return nil, err
+ }
+ if res.StatusCode != http.StatusUnauthorized {
+ return res, err
+ }
+ resauth = authheader(res.Header.Values("Www-Authenticate"))
+ }
+
+ if resauth.IsNegotiate() || resauth.IsNTLM() {
+ // 401 with request:Basic and response:Negotiate
+ io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+
+ // recycle credentials
+ u, p, err := reqauth.GetBasicCreds()
+ if err != nil {
+ return nil, err
+ }
+
+ // get domain from username
+ domain := ""
+ u, domain, domainNeeded := GetDomain(u)
+
+ // send negotiate
+ negotiateMessage, err := NewNegotiateMessage(domain, "")
+ if err != nil {
+ return nil, err
+ }
+ if resauth.IsNTLM() {
+ req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage))
+ } else {
+ req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(negotiateMessage))
+ }
+
+ req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
+
+ res, err = rt.RoundTrip(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // receive challenge?
+ resauth = authheader(res.Header.Values("Www-Authenticate"))
+ challengeMessage, err := resauth.GetData()
+ if err != nil {
+ return nil, err
+ }
+ if !(resauth.IsNegotiate() || resauth.IsNTLM()) || len(challengeMessage) == 0 {
+ // Negotiation failed, let client deal with response
+ return res, nil
+ }
+ io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+
+ // send authenticate
+ authenticateMessage, err := ProcessChallenge(challengeMessage, u, p, domainNeeded)
+ if err != nil {
+ return nil, err
+ }
+ if resauth.IsNTLM() {
+ req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticateMessage))
+ } else {
+ req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(authenticateMessage))
+ }
+
+ req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
+
+ return rt.RoundTrip(req)
+ }
+
+ return res, err
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/nlmp.go b/vendor/github.com/Azure/go-ntlmssp/nlmp.go
new file mode 100644
index 0000000..1e65abe
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/nlmp.go
@@ -0,0 +1,51 @@
+// Package ntlmssp provides NTLM/Negotiate authentication over HTTP
+//
+// Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx,
+// implementation hints from http://davenport.sourceforge.net/ntlm.html .
+// This package only implements authentication, no key exchange or encryption. It
+// only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding.
+// This package implements NTLMv2.
+package ntlmssp
+
+import (
+ "crypto/hmac"
+ "crypto/md5"
+ "golang.org/x/crypto/md4"
+ "strings"
+)
+
+func getNtlmV2Hash(password, username, target string) []byte {
+ return hmacMd5(getNtlmHash(password), toUnicode(strings.ToUpper(username)+target))
+}
+
+func getNtlmHash(password string) []byte {
+ hash := md4.New()
+ hash.Write(toUnicode(password))
+ return hash.Sum(nil)
+}
+
+func computeNtlmV2Response(ntlmV2Hash, serverChallenge, clientChallenge,
+ timestamp, targetInfo []byte) []byte {
+
+ temp := []byte{1, 1, 0, 0, 0, 0, 0, 0}
+ temp = append(temp, timestamp...)
+ temp = append(temp, clientChallenge...)
+ temp = append(temp, 0, 0, 0, 0)
+ temp = append(temp, targetInfo...)
+ temp = append(temp, 0, 0, 0, 0)
+
+ NTProofStr := hmacMd5(ntlmV2Hash, serverChallenge, temp)
+ return append(NTProofStr, temp...)
+}
+
+func computeLmV2Response(ntlmV2Hash, serverChallenge, clientChallenge []byte) []byte {
+ return append(hmacMd5(ntlmV2Hash, serverChallenge, clientChallenge), clientChallenge...)
+}
+
+func hmacMd5(key []byte, data ...[]byte) []byte {
+ mac := hmac.New(md5.New, key)
+ for _, d := range data {
+ mac.Write(d)
+ }
+ return mac.Sum(nil)
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/unicode.go b/vendor/github.com/Azure/go-ntlmssp/unicode.go
new file mode 100644
index 0000000..7b4f471
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/unicode.go
@@ -0,0 +1,29 @@
+package ntlmssp
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "unicode/utf16"
+)
+
+// helper func's for dealing with Windows Unicode (UTF16LE)
+
+func fromUnicode(d []byte) (string, error) {
+ if len(d)%2 > 0 {
+ return "", errors.New("Unicode (UTF 16 LE) specified, but uneven data length")
+ }
+ s := make([]uint16, len(d)/2)
+ err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s)
+ if err != nil {
+ return "", err
+ }
+ return string(utf16.Decode(s)), nil
+}
+
+func toUnicode(s string) []byte {
+ uints := utf16.Encode([]rune(s))
+ b := bytes.Buffer{}
+ binary.Write(&b, binary.LittleEndian, &uints)
+ return b.Bytes()
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/varfield.go b/vendor/github.com/Azure/go-ntlmssp/varfield.go
new file mode 100644
index 0000000..15f9aa1
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/varfield.go
@@ -0,0 +1,40 @@
+package ntlmssp
+
+import (
+ "errors"
+)
+
+type varField struct {
+ Len uint16
+ MaxLen uint16
+ BufferOffset uint32
+}
+
+func (f varField) ReadFrom(buffer []byte) ([]byte, error) {
+ if len(buffer) < int(f.BufferOffset+uint32(f.Len)) {
+ return nil, errors.New("Error reading data, varField extends beyond buffer")
+ }
+ return buffer[f.BufferOffset : f.BufferOffset+uint32(f.Len)], nil
+}
+
+func (f varField) ReadStringFrom(buffer []byte, unicode bool) (string, error) {
+ d, err := f.ReadFrom(buffer)
+ if err != nil {
+ return "", err
+ }
+ if unicode { // UTF-16LE encoding scheme
+ return fromUnicode(d)
+ }
+ // OEM encoding, close enough to ASCII, since no code page is specified
+ return string(d), err
+}
+
+func newVarField(ptr *int, fieldsize int) varField {
+ f := varField{
+ Len: uint16(fieldsize),
+ MaxLen: uint16(fieldsize),
+ BufferOffset: uint32(*ptr),
+ }
+ *ptr += fieldsize
+ return f
+}
diff --git a/vendor/github.com/Azure/go-ntlmssp/version.go b/vendor/github.com/Azure/go-ntlmssp/version.go
new file mode 100644
index 0000000..6d84892
--- /dev/null
+++ b/vendor/github.com/Azure/go-ntlmssp/version.go
@@ -0,0 +1,20 @@
+package ntlmssp
+
+// Version is a struct representing https://msdn.microsoft.com/en-us/library/cc236654.aspx
+type Version struct {
+ ProductMajorVersion uint8
+ ProductMinorVersion uint8
+ ProductBuild uint16
+ _ [3]byte
+ NTLMRevisionCurrent uint8
+}
+
+// DefaultVersion returns a Version with "sensible" defaults (Windows 7)
+func DefaultVersion() Version {
+ return Version{
+ ProductMajorVersion: 6,
+ ProductMinorVersion: 1,
+ ProductBuild: 7601,
+ NTLMRevisionCurrent: 15,
+ }
+}
diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE
new file mode 100644
index 0000000..6d0eb9d
--- /dev/null
+++ b/vendor/github.com/MakeNowJust/heredoc/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2019 TSUYUSATO Kitsune
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md
new file mode 100644
index 0000000..e9924d2
--- /dev/null
+++ b/vendor/github.com/MakeNowJust/heredoc/README.md
@@ -0,0 +1,52 @@
+# heredoc
+
+[](https://circleci.com/gh/MakeNowJust/heredoc) [](https://godoc.org/github.com/MakeNowJust/heredoc)
+
+## About
+
+Package heredoc provides the here-document with keeping indent.
+
+## Install
+
+```console
+$ go get github.com/MakeNowJust/heredoc
+```
+
+## Import
+
+```go
+// usual
+import "github.com/MakeNowJust/heredoc"
+```
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/MakeNowJust/heredoc"
+)
+
+func main() {
+ fmt.Println(heredoc.Doc(`
+ Lorem ipsum dolor sit amet, consectetur adipisicing elit,
+ sed do eiusmod tempor incididunt ut labore et dolore magna
+ aliqua. Ut enim ad minim veniam, ...
+ `))
+ // Output:
+ // Lorem ipsum dolor sit amet, consectetur adipisicing elit,
+ // sed do eiusmod tempor incididunt ut labore et dolore magna
+ // aliqua. Ut enim ad minim veniam, ...
+ //
+}
+```
+
+## API Document
+
+ - [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc)
+
+## License
+
+This software is released under the MIT License, see LICENSE.
diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go
new file mode 100644
index 0000000..1fc0469
--- /dev/null
+++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go
@@ -0,0 +1,105 @@
+// Copyright (c) 2014-2019 TSUYUSATO Kitsune
+// This software is released under the MIT License.
+// http://opensource.org/licenses/mit-license.php
+
+// Package heredoc provides creation of here-documents from raw strings.
+//
+// Golang supports raw-string syntax.
+//
+// doc := `
+// Foo
+// Bar
+// `
+//
+// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to
+//
+// "\n\tFoo\n\tBar\n"
+//
+// I dont't want this!
+//
+// However this problem is solved by package heredoc.
+//
+// doc := heredoc.Doc(`
+// Foo
+// Bar
+// `)
+//
+// Is equivalent to
+//
+// "Foo\nBar\n"
+package heredoc
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// Doc returns un-indented string as here-document.
+func Doc(raw string) string {
+ skipFirstLine := false
+ if len(raw) > 0 && raw[0] == '\n' {
+ raw = raw[1:]
+ } else {
+ skipFirstLine = true
+ }
+
+ lines := strings.Split(raw, "\n")
+
+ minIndentSize := getMinIndent(lines, skipFirstLine)
+ lines = removeIndentation(lines, minIndentSize, skipFirstLine)
+
+ return strings.Join(lines, "\n")
+}
+
+// getMinIndent calculates the minimum indentation in lines, excluding empty lines.
+func getMinIndent(lines []string, skipFirstLine bool) int {
+ minIndentSize := maxInt
+
+ for i, line := range lines {
+ if i == 0 && skipFirstLine {
+ continue
+ }
+
+ indentSize := 0
+ for _, r := range []rune(line) {
+ if unicode.IsSpace(r) {
+ indentSize += 1
+ } else {
+ break
+ }
+ }
+
+ if len(line) == indentSize {
+ if i == len(lines)-1 && indentSize < minIndentSize {
+ lines[i] = ""
+ }
+ } else if indentSize < minIndentSize {
+ minIndentSize = indentSize
+ }
+ }
+ return minIndentSize
+}
+
+// removeIndentation removes n characters from the front of each line in lines.
+// Skips first line if skipFirstLine is true, skips empty lines.
+func removeIndentation(lines []string, n int, skipFirstLine bool) []string {
+ for i, line := range lines {
+ if i == 0 && skipFirstLine {
+ continue
+ }
+
+ if len(lines[i]) >= n {
+ lines[i] = line[n:]
+ }
+ }
+ return lines
+}
+
+// Docf returns unindented and formatted string as here-document.
+// Formatting is done as for fmt.Printf().
+func Docf(raw string, args ...interface{}) string {
+ return fmt.Sprintf(Doc(raw), args...)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
new file mode 100644
index 0000000..24b5306
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
new file mode 100644
index 0000000..33c8830
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -0,0 +1,74 @@
+# xxhash
+
+[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
+[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
+
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
+
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
+
+```
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
+- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh
new file mode 100644
index 0000000..94b9c44
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/testall.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
new file mode 100644
index 0000000..78bddf1
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -0,0 +1,243 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array for the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
+
+// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest with a zero seed.
+func New() *Digest {
+ return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
+ var d Digest
+ d.ResetWithSeed(seed)
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
+func (d *Digest) Reset() {
+ d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+ d.v1 = seed + prime1 + prime2
+ d.v2 = seed + prime2
+ d.v3 = seed
+ d.v4 = seed - prime1
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(memleft, b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ c := copy(memleft, b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[c:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ b = b[4:]
+ }
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
new file mode 100644
index 0000000..3e8b132
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -0,0 +1,209 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ // Load fixed primes.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
+
+ // Load slice.
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, end
+
+ // Check whether we have at least one block.
+ CMPQ n, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·primes+32(SB), h
+
+afterBlocks:
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
+ JGE finalize
+
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
+
+ CMPQ p, end
+ JL loop1
+
+finalize:
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+
+ // Load slice.
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
+
+ // Load vN from d.
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+ blockLoop()
+
+ // Copy vN back to d.
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
+
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
new file mode 100644
index 0000000..7e3145a
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
new file mode 100644
index 0000000..78f95f2
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -0,0 +1,15 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
new file mode 100644
index 0000000..118e49e
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -0,0 +1,76 @@
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
+func Sum64(b []byte) uint64 {
+ // A simpler version would be
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
+ // but this is faster, particularly for small inputs.
+
+ n := len(b)
+ var h uint64
+
+ if n >= 32 {
+ v1 := primes[0] + prime2
+ v2 := prime2
+ v3 := uint64(0)
+ v4 := -primes[0]
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = prime5
+ }
+
+ h += uint64(n)
+
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ b = b[4:]
+ }
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
new file mode 100644
index 0000000..05f5e7d
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -0,0 +1,16 @@
+//go:build appengine
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
+func Sum64String(s string) uint64 {
+ return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
new file mode 100644
index 0000000..cf9d42a
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -0,0 +1,58 @@
+//go:build !appengine
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+ "unsafe"
+)
+
+// In the future it's possible that compiler optimizations will make these
+// XxxString functions unnecessary by realizing that calls such as
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
+// If that happens, even if we keep these functions they can be replaced with
+// the trivial safe code.
+
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
+//
+// var b []byte
+// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+// bh.Len = len(s)
+// bh.Cap = len(s)
+//
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
+// weight to this sequence of expressions that any function that uses it will
+// not be inlined. Instead, the functions below use a different unsafe
+// conversion designed to minimize the inliner weight and allow both to be
+// inlined. There is also a test (TestInlining) which verifies that these are
+// inlined.
+//
+// See https://github.com/golang/go/issues/42739 for discussion.
+
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+ b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
+ return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
+ // d.Write always returns len(s), nil.
+ // Ignoring the return output and returning these fixed values buys a
+ // savings of 6 in the inliner's cost model.
+ return len(s), nil
+}
+
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
+// of the first two words is the same as the layout of a string.
+type sliceHeader struct {
+ s string
+ cap int
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE
new file mode 100644
index 0000000..22080f7
--- /dev/null
+++ b/vendor/github.com/dgryski/go-rendezvous/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-2020 Damian Gryski
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go
new file mode 100644
index 0000000..7a6f820
--- /dev/null
+++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go
@@ -0,0 +1,79 @@
+package rendezvous
+
+type Rendezvous struct {
+ nodes map[string]int
+ nstr []string
+ nhash []uint64
+ hash Hasher
+}
+
+type Hasher func(s string) uint64
+
+func New(nodes []string, hash Hasher) *Rendezvous {
+ r := &Rendezvous{
+ nodes: make(map[string]int, len(nodes)),
+ nstr: make([]string, len(nodes)),
+ nhash: make([]uint64, len(nodes)),
+ hash: hash,
+ }
+
+ for i, n := range nodes {
+ r.nodes[n] = i
+ r.nstr[i] = n
+ r.nhash[i] = hash(n)
+ }
+
+ return r
+}
+
+func (r *Rendezvous) Lookup(k string) string {
+ // short-circuit if we're empty
+ if len(r.nodes) == 0 {
+ return ""
+ }
+
+ khash := r.hash(k)
+
+ var midx int
+ var mhash = xorshiftMult64(khash ^ r.nhash[0])
+
+ for i, nhash := range r.nhash[1:] {
+ if h := xorshiftMult64(khash ^ nhash); h > mhash {
+ midx = i + 1
+ mhash = h
+ }
+ }
+
+ return r.nstr[midx]
+}
+
+func (r *Rendezvous) Add(node string) {
+ r.nodes[node] = len(r.nstr)
+ r.nstr = append(r.nstr, node)
+ r.nhash = append(r.nhash, r.hash(node))
+}
+
+func (r *Rendezvous) Remove(node string) {
+ // find index of node to remove
+ nidx := r.nodes[node]
+
+ // remove from the slices
+ l := len(r.nstr)
+ r.nstr[nidx] = r.nstr[l]
+ r.nstr = r.nstr[:l]
+
+ r.nhash[nidx] = r.nhash[l]
+ r.nhash = r.nhash[:l]
+
+ // update the map
+ delete(r.nodes, node)
+ moved := r.nstr[nidx]
+ r.nodes[moved] = nidx
+}
+
+func xorshiftMult64(x uint64) uint64 {
+ x ^= x >> 12 // a
+ x ^= x << 25 // b
+ x ^= x >> 27 // c
+ return x * 2685821657736338717
+}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE
new file mode 100644
index 0000000..23f9425
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
+Portions copyright (c) 2015-2016 go-asn1-ber Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/README.md b/vendor/github.com/go-asn1-ber/asn1-ber/README.md
new file mode 100644
index 0000000..e3a9560
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/README.md
@@ -0,0 +1,24 @@
+[](https://godoc.org/gopkg.in/asn1-ber.v1) [](https://travis-ci.org/go-asn1-ber/asn1-ber)
+
+
+ASN1 BER Encoding / Decoding Library for the GO programming language.
+---------------------------------------------------------------------
+
+Required libraries:
+ None
+
+Working:
+ Very basic encoding / decoding needed for LDAP protocol
+
+Tests Implemented:
+ A few
+
+TODO:
+ Fix all encoding / decoding to conform to ASN1 BER spec
+ Implement Tests / Benchmarks
+
+---
+
+The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
+The design is licensed under the Creative Commons 3.0 Attributions license.
+Read this article for more details: http://blog.golang.org/gopher
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/ber.go b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go
new file mode 100644
index 0000000..f27229e
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/ber.go
@@ -0,0 +1,866 @@
+package ber
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for
+// no limit.
+var MaxPacketLengthBytes int64 = math.MaxInt32
+
+type Packet struct {
+ Identifier
+ Value interface{}
+ ByteValue []byte
+ Data *bytes.Buffer
+ Children []*Packet
+ Description string
+}
+
+type Identifier struct {
+ ClassType Class
+ TagType Type
+ Tag Tag
+}
+
+type Tag uint64
+
+const (
+ TagEOC Tag = 0x00
+ TagBoolean Tag = 0x01
+ TagInteger Tag = 0x02
+ TagBitString Tag = 0x03
+ TagOctetString Tag = 0x04
+ TagNULL Tag = 0x05
+ TagObjectIdentifier Tag = 0x06
+ TagObjectDescriptor Tag = 0x07
+ TagExternal Tag = 0x08
+ TagRealFloat Tag = 0x09
+ TagEnumerated Tag = 0x0a
+ TagEmbeddedPDV Tag = 0x0b
+ TagUTF8String Tag = 0x0c
+ TagRelativeOID Tag = 0x0d
+ TagSequence Tag = 0x10
+ TagSet Tag = 0x11
+ TagNumericString Tag = 0x12
+ TagPrintableString Tag = 0x13
+ TagT61String Tag = 0x14
+ TagVideotexString Tag = 0x15
+ TagIA5String Tag = 0x16
+ TagUTCTime Tag = 0x17
+ TagGeneralizedTime Tag = 0x18
+ TagGraphicString Tag = 0x19
+ TagVisibleString Tag = 0x1a
+ TagGeneralString Tag = 0x1b
+ TagUniversalString Tag = 0x1c
+ TagCharacterString Tag = 0x1d
+ TagBMPString Tag = 0x1e
+ TagBitmask Tag = 0x1f // xxx11111b
+
+ // HighTag indicates the start of a high-tag byte sequence
+ HighTag Tag = 0x1f // xxx11111b
+ // HighTagContinueBitmask indicates the high-tag byte sequence should continue
+ HighTagContinueBitmask Tag = 0x80 // 10000000b
+ // HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte
+ HighTagValueBitmask Tag = 0x7f // 01111111b
+)
+
+const (
+ // LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used
+ LengthLongFormBitmask = 0x80
+ // LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence
+ LengthValueBitmask = 0x7f
+
+ // LengthIndefinite is returned from readLength to indicate an indefinite length
+ LengthIndefinite = -1
+)
+
+var tagMap = map[Tag]string{
+ TagEOC: "EOC (End-of-Content)",
+ TagBoolean: "Boolean",
+ TagInteger: "Integer",
+ TagBitString: "Bit String",
+ TagOctetString: "Octet String",
+ TagNULL: "NULL",
+ TagObjectIdentifier: "Object Identifier",
+ TagObjectDescriptor: "Object Descriptor",
+ TagExternal: "External",
+ TagRealFloat: "Real (float)",
+ TagEnumerated: "Enumerated",
+ TagEmbeddedPDV: "Embedded PDV",
+ TagUTF8String: "UTF8 String",
+ TagRelativeOID: "Relative-OID",
+ TagSequence: "Sequence and Sequence of",
+ TagSet: "Set and Set OF",
+ TagNumericString: "Numeric String",
+ TagPrintableString: "Printable String",
+ TagT61String: "T61 String",
+ TagVideotexString: "Videotex String",
+ TagIA5String: "IA5 String",
+ TagUTCTime: "UTC Time",
+ TagGeneralizedTime: "Generalized Time",
+ TagGraphicString: "Graphic String",
+ TagVisibleString: "Visible String",
+ TagGeneralString: "General String",
+ TagUniversalString: "Universal String",
+ TagCharacterString: "Character String",
+ TagBMPString: "BMP String",
+}
+
+type Class uint8
+
+const (
+ ClassUniversal Class = 0 // 00xxxxxxb
+ ClassApplication Class = 64 // 01xxxxxxb
+ ClassContext Class = 128 // 10xxxxxxb
+ ClassPrivate Class = 192 // 11xxxxxxb
+ ClassBitmask Class = 192 // 11xxxxxxb
+)
+
+var ClassMap = map[Class]string{
+ ClassUniversal: "Universal",
+ ClassApplication: "Application",
+ ClassContext: "Context",
+ ClassPrivate: "Private",
+}
+
+type Type uint8
+
+const (
+ TypePrimitive Type = 0 // xx0xxxxxb
+ TypeConstructed Type = 32 // xx1xxxxxb
+ TypeBitmask Type = 32 // xx1xxxxxb
+)
+
+var TypeMap = map[Type]string{
+ TypePrimitive: "Primitive",
+ TypeConstructed: "Constructed",
+}
+
+var Debug = false
+
+func PrintBytes(out io.Writer, buf []byte, indent string) {
+ dataLines := make([]string, (len(buf)/30)+1)
+ numLines := make([]string, (len(buf)/30)+1)
+
+ for i, b := range buf {
+ dataLines[i/30] += fmt.Sprintf("%02x ", b)
+ numLines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
+ }
+
+ for i := 0; i < len(dataLines); i++ {
+ _, _ = out.Write([]byte(indent + dataLines[i] + "\n"))
+ _, _ = out.Write([]byte(indent + numLines[i] + "\n\n"))
+ }
+}
+
+func WritePacket(out io.Writer, p *Packet) {
+ printPacket(out, p, 0, false)
+}
+
+func PrintPacket(p *Packet) {
+ printPacket(os.Stdout, p, 0, false)
+}
+
+// Return a string describing packet content. This is not recursive,
+// If the packet is a sequence, use `printPacket()`, or browse
+// sequence yourself.
+func DescribePacket(p *Packet) string {
+
+ classStr := ClassMap[p.ClassType]
+
+ tagTypeStr := TypeMap[p.TagType]
+
+ tagStr := fmt.Sprintf("0x%02X", p.Tag)
+
+ if p.ClassType == ClassUniversal {
+ tagStr = tagMap[p.Tag]
+ }
+
+ value := fmt.Sprint(p.Value)
+ description := ""
+
+ if p.Description != "" {
+ description = p.Description + ": "
+ }
+
+ return fmt.Sprintf("%s(%s, %s, %s) Len=%d %q", description, classStr, tagTypeStr, tagStr, p.Data.Len(), value)
+}
+
+func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
+ indentStr := ""
+
+ for len(indentStr) != indent {
+ indentStr += " "
+ }
+
+ _, _ = fmt.Fprintf(out, "%s%s\n", indentStr, DescribePacket(p))
+
+ if printBytes {
+ PrintBytes(out, p.Bytes(), indentStr)
+ }
+
+ for _, child := range p.Children {
+ printPacket(out, child, indent+1, printBytes)
+ }
+}
+
+// ReadPacket reads a single Packet from the reader.
+func ReadPacket(reader io.Reader) (*Packet, error) {
+ p, _, err := readPacket(reader)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func DecodeString(data []byte) string {
+ return string(data)
+}
+
+func ParseInt64(bytes []byte) (ret int64, err error) {
+ if len(bytes) > 8 {
+ // We'll overflow an int64 in this case.
+ err = fmt.Errorf("integer too large")
+ return
+ }
+ for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+ ret <<= 8
+ ret |= int64(bytes[bytesRead])
+ }
+
+ // Shift up and down in order to sign extend the result.
+ ret <<= 64 - uint8(len(bytes))*8
+ ret >>= 64 - uint8(len(bytes))*8
+ return
+}
+
+func encodeInteger(i int64) []byte {
+ n := int64Length(i)
+ out := make([]byte, n)
+
+ var j int
+ for ; n > 0; n-- {
+ out[j] = byte(i >> uint((n-1)*8))
+ j++
+ }
+
+ return out
+}
+
+func int64Length(i int64) (numBytes int) {
+ numBytes = 1
+
+ for i > 127 {
+ numBytes++
+ i >>= 8
+ }
+
+ for i < -128 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
+
+// DecodePacket decodes the given bytes into a single Packet
+// If a decode error is encountered, nil is returned.
+func DecodePacket(data []byte) *Packet {
+ p, _, _ := readPacket(bytes.NewBuffer(data))
+
+ return p
+}
+
+// DecodePacketErr decodes the given bytes into a single Packet
+// If a decode error is encountered, nil is returned.
+func DecodePacketErr(data []byte) (*Packet, error) {
+ p, _, err := readPacket(bytes.NewBuffer(data))
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// readPacket reads a single Packet from the reader, returning the number of bytes read.
+func readPacket(reader io.Reader) (*Packet, int, error) {
+ identifier, length, read, err := readHeader(reader)
+ if err != nil {
+ return nil, read, err
+ }
+
+ p := &Packet{
+ Identifier: identifier,
+ }
+
+ p.Data = new(bytes.Buffer)
+ p.Children = make([]*Packet, 0, 2)
+ p.Value = nil
+
+ if p.TagType == TypeConstructed {
+ // TODO: if universal, ensure tag type is allowed to be constructed
+
+ // Track how much content we've read
+ contentRead := 0
+ for {
+ if length != LengthIndefinite {
+ // End if we've read what we've been told to
+ if contentRead == length {
+ break
+ }
+ // Detect if a packet boundary didn't fall on the expected length
+ if contentRead > length {
+ return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead)
+ }
+ }
+
+ // Read the next packet
+ child, r, err := readPacket(reader)
+ if err != nil {
+ return nil, read, unexpectedEOF(err)
+ }
+ contentRead += r
+ read += r
+
+ // Test is this is the EOC marker for our packet
+ if isEOCPacket(child) {
+ if length == LengthIndefinite {
+ break
+ }
+ return nil, read, errors.New("eoc child not allowed with definite length")
+ }
+
+ // Append and continue
+ p.AppendChild(child)
+ }
+ return p, read, nil
+ }
+
+ if length == LengthIndefinite {
+ return nil, read, errors.New("indefinite length used with primitive type")
+ }
+
+ // Read definite-length content
+ if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes {
+ return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes)
+ }
+
+ var content []byte
+ if length > 0 {
+ // Read the content and limit it to the parsed length.
+ // If the content is less than the length, we return an EOF error.
+ content, err = ioutil.ReadAll(io.LimitReader(reader, int64(length)))
+ if err == nil && len(content) < int(length) {
+ err = io.EOF
+ }
+ if err != nil {
+ return nil, read, unexpectedEOF(err)
+ }
+ read += len(content)
+ } else {
+ // If length == 0, we set the ByteValue to an empty slice
+ content = make([]byte, 0)
+ }
+
+ if p.ClassType == ClassUniversal {
+ p.Data.Write(content)
+ p.ByteValue = content
+
+ switch p.Tag {
+ case TagEOC:
+ case TagBoolean:
+ val, _ := ParseInt64(content)
+
+ p.Value = val != 0
+ case TagInteger:
+ p.Value, _ = ParseInt64(content)
+ case TagBitString:
+ case TagOctetString:
+ // the actual string encoding is not known here
+ // (e.g. for LDAP content is already an UTF8-encoded
+ // string). Return the data without further processing
+ p.Value = DecodeString(content)
+ case TagNULL:
+ case TagObjectIdentifier:
+ oid, err := parseObjectIdentifier(content)
+ if err == nil {
+ p.Value = OIDToString(oid)
+ }
+ case TagObjectDescriptor:
+ case TagExternal:
+ case TagRealFloat:
+ p.Value, err = ParseReal(content)
+ case TagEnumerated:
+ p.Value, _ = ParseInt64(content)
+ case TagEmbeddedPDV:
+ case TagUTF8String:
+ val := DecodeString(content)
+ if !utf8.Valid([]byte(val)) {
+ err = errors.New("invalid UTF-8 string")
+ } else {
+ p.Value = val
+ }
+ case TagRelativeOID:
+ oid, err := parseRelativeObjectIdentifier(content)
+ if err == nil {
+ p.Value = OIDToString(oid)
+ }
+ case TagSequence:
+ case TagSet:
+ case TagNumericString:
+ case TagPrintableString:
+ val := DecodeString(content)
+ if err = isPrintableString(val); err == nil {
+ p.Value = val
+ }
+ case TagT61String:
+ case TagVideotexString:
+ case TagIA5String:
+ val := DecodeString(content)
+ for i, c := range val {
+ if c >= 0x7F {
+ err = fmt.Errorf("invalid character for IA5String at pos %d: %c", i, c)
+ break
+ }
+ }
+ if err == nil {
+ p.Value = val
+ }
+ case TagUTCTime:
+ case TagGeneralizedTime:
+ p.Value, err = ParseGeneralizedTime(content)
+ case TagGraphicString:
+ case TagVisibleString:
+ case TagGeneralString:
+ case TagUniversalString:
+ case TagCharacterString:
+ case TagBMPString:
+ }
+ } else {
+ p.Data.Write(content)
+ }
+
+ return p, read, err
+}
+
+func isPrintableString(val string) error {
+ for i, c := range val {
+ switch {
+ case c >= 'a' && c <= 'z':
+ case c >= 'A' && c <= 'Z':
+ case c >= '0' && c <= '9':
+ default:
+ switch c {
+ case '\'', '(', ')', '+', ',', '-', '.', '=', '/', ':', '?', ' ':
+ default:
+ return fmt.Errorf("invalid character in position %d", i)
+ }
+ }
+ }
+ return nil
+}
+
+func (p *Packet) Bytes() []byte {
+ var out bytes.Buffer
+
+ out.Write(encodeIdentifier(p.Identifier))
+ out.Write(encodeLength(p.Data.Len()))
+ out.Write(p.Data.Bytes())
+
+ return out.Bytes()
+}
+
+func (p *Packet) AppendChild(child *Packet) {
+ p.Data.Write(child.Bytes())
+ p.Children = append(p.Children, child)
+}
+
+func Encode(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet {
+ p := new(Packet)
+
+ p.ClassType = classType
+ p.TagType = tagType
+ p.Tag = tag
+ p.Data = new(bytes.Buffer)
+
+ p.Children = make([]*Packet, 0, 2)
+
+ p.Value = value
+ p.Description = description
+
+ if value != nil {
+ v := reflect.ValueOf(value)
+
+ if classType == ClassUniversal {
+ switch tag {
+ case TagOctetString:
+ sv, ok := v.Interface().(string)
+
+ if ok {
+ p.Data.Write([]byte(sv))
+ }
+ case TagEnumerated:
+ bv, ok := v.Interface().([]byte)
+ if ok {
+ p.Data.Write(bv)
+ }
+ case TagEmbeddedPDV:
+ bv, ok := v.Interface().([]byte)
+ if ok {
+ p.Data.Write(bv)
+ }
+ }
+ } else if classType == ClassContext {
+ switch tag {
+ case TagEnumerated:
+ bv, ok := v.Interface().([]byte)
+ if ok {
+ p.Data.Write(bv)
+ }
+ case TagEmbeddedPDV:
+ bv, ok := v.Interface().([]byte)
+ if ok {
+ p.Data.Write(bv)
+ }
+ }
+ }
+ }
+ return p
+}
+
+func NewSequence(description string) *Packet {
+ return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, description)
+}
+
+func NewBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet {
+ intValue := int64(0)
+
+ if value {
+ intValue = 1
+ }
+
+ p := Encode(classType, tagType, tag, nil, description)
+
+ p.Value = value
+ p.Data.Write(encodeInteger(intValue))
+
+ return p
+}
+
+// NewLDAPBoolean returns a RFC 4511-compliant Boolean packet.
+func NewLDAPBoolean(classType Class, tagType Type, tag Tag, value bool, description string) *Packet {
+ p := Encode(classType, tagType, tag, nil, description)
+
+ p.Value = value
+ if value {
+ p.Data.Write([]byte{255})
+ } else {
+ p.Data.Write([]byte{0})
+ }
+
+ return p
+}
+
+func NewInteger(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet {
+ p := Encode(classType, tagType, tag, nil, description)
+
+ p.Value = value
+ switch v := value.(type) {
+ case int:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int64:
+ p.Data.Write(encodeInteger(v))
+ case uint64:
+ // TODO : check range or add encodeUInt...
+ p.Data.Write(encodeInteger(int64(v)))
+ case int32:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint32:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int16:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint16:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int8:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint8:
+ p.Data.Write(encodeInteger(int64(v)))
+ default:
+ // TODO : add support for big.Int ?
+ panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
+ }
+
+ return p
+}
+
+func NewString(classType Class, tagType Type, tag Tag, value, description string) *Packet {
+ p := Encode(classType, tagType, tag, nil, description)
+
+ p.Value = value
+ p.Data.Write([]byte(value))
+
+ return p
+}
+
+func NewGeneralizedTime(classType Class, tagType Type, tag Tag, value time.Time, description string) *Packet {
+ p := Encode(classType, tagType, tag, nil, description)
+ var s string
+ if value.Nanosecond() != 0 {
+ s = value.Format(`20060102150405.000000000Z`)
+ } else {
+ s = value.Format(`20060102150405Z`)
+ }
+ p.Value = s
+ p.Data.Write([]byte(s))
+ return p
+}
+
+func NewReal(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet {
+ p := Encode(classType, tagType, tag, nil, description)
+
+ switch v := value.(type) {
+ case float64:
+ p.Data.Write(encodeFloat(v))
+ case float32:
+ p.Data.Write(encodeFloat(float64(v)))
+ default:
+ panic(fmt.Sprintf("Invalid type %T, expected float{64|32}", v))
+ }
+ return p
+}
+
+func NewOID(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet {
+ p := Encode(classType, tagType, tag, nil, description)
+
+ switch v := value.(type) {
+ case string:
+ encoded, err := encodeOID(v)
+ if err != nil {
+ fmt.Printf("failed writing %v", err)
+ return nil
+ }
+ p.Value = v
+ p.Data.Write(encoded)
+ // TODO: support []int already ?
+ default:
+ panic(fmt.Sprintf("Invalid type %T, expected float{64|32}", v))
+ }
+ return p
+}
+
+func NewRelativeOID(classType Class, tagType Type, tag Tag, value interface{}, description string) *Packet {
+ p := Encode(classType, tagType, tag, nil, description)
+
+ switch v := value.(type) {
+ case string:
+ encoded, err := encodeRelativeOID(v)
+ if err != nil {
+ fmt.Printf("failed writing %v", err)
+ return nil
+ }
+ p.Value = v
+ p.Data.Write(encoded)
+ // TODO: support []int already ?
+ default:
+ panic(fmt.Sprintf("Invalid type %T, expected float{64|32}", v))
+ }
+ return p
+}
+
+// encodeOID takes a string representation of an OID and returns its DER-encoded byte slice along with any error.
+func encodeOID(oidString string) ([]byte, error) {
+ // Convert the string representation to an asn1.ObjectIdentifier
+ parts := strings.Split(oidString, ".")
+ oid := make([]int, len(parts))
+ for i, part := range parts {
+ var val int
+ if _, err := fmt.Sscanf(part, "%d", &val); err != nil {
+ return nil, fmt.Errorf("invalid OID part '%s': %w", part, err)
+ }
+ oid[i] = val
+ }
+ if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
+ panic(fmt.Sprintf("invalid object identifier % d", oid)) // TODO: not elegant
+ }
+ encoded := make([]byte, 0)
+
+ encoded = appendBase128Int(encoded[:0], int64(oid[0]*40+oid[1]))
+ for i := 2; i < len(oid); i++ {
+ encoded = appendBase128Int(encoded, int64(oid[i]))
+ }
+
+ return encoded, nil
+}
+
+func encodeRelativeOID(oidString string) ([]byte, error) {
+ parts := strings.Split(oidString, ".")
+ oid := make([]int, len(parts))
+ for i, part := range parts {
+ var val int
+ if _, err := fmt.Sscanf(part, "%d", &val); err != nil {
+ return nil, fmt.Errorf("invalid RELATIVE OID part '%s': %w", part, err)
+ }
+ oid[i] = val
+ }
+
+ encoded := make([]byte, 0)
+
+ for i := 0; i < len(oid); i++ {
+ encoded = appendBase128Int(encoded, int64(oid[i]))
+ }
+
+ return encoded, nil
+}
+
+func appendBase128Int(dst []byte, n int64) []byte {
+ l := base128IntLength(n)
+
+ for i := l - 1; i >= 0; i-- {
+ o := byte(n >> uint(i*7))
+ o &= 0x7f
+ if i != 0 {
+ o |= 0x80
+ }
+
+ dst = append(dst, o)
+ }
+
+ return dst
+}
+func base128IntLength(n int64) int {
+ if n == 0 {
+ return 1
+ }
+
+ l := 0
+ for i := n; i > 0; i >>= 7 {
+ l++
+ }
+
+ return l
+}
+
+func OIDToString(oi []int) string {
+ var s strings.Builder
+ s.Grow(32)
+
+ buf := make([]byte, 0, 19)
+ for i, v := range oi {
+ if i > 0 {
+ s.WriteByte('.')
+ }
+ s.Write(strconv.AppendInt(buf, int64(v), 10))
+ }
+
+ return s.String()
+}
+
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
+func parseObjectIdentifier(bytes []byte) (s []int, err error) {
+ if len(bytes) == 0 {
+ err = fmt.Errorf("zero length OBJECT IDENTIFIER")
+ return
+ }
+
+ // In the worst case, we get two elements from the first byte (which is
+ // encoded differently) and then every varint is a single byte long.
+ s = make([]int, len(bytes)+1)
+
+ // The first varint is 40*value1 + value2:
+ // According to this packing, value1 can take the values 0, 1 and 2 only.
+ // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+ // then there are no restrictions on value2.
+ v, offset, err := parseBase128Int(bytes, 0)
+ if err != nil {
+ return
+ }
+ if v < 80 {
+ s[0] = v / 40
+ s[1] = v % 40
+ } else {
+ s[0] = 2
+ s[1] = v - 80
+ }
+
+ i := 2
+ for ; offset < len(bytes); i++ {
+ v, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ s[i] = v
+ }
+ s = s[0:i]
+ return
+}
+
+func parseRelativeObjectIdentifier(bytes []byte) (s []int, err error) {
+ if len(bytes) == 0 {
+ err = fmt.Errorf("zero length RELATIVE OBJECT IDENTIFIER")
+ return
+ }
+
+ s = make([]int, len(bytes)+1)
+
+ var v, offset int
+ i := 0
+ for ; offset < len(bytes); i++ {
+ v, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ s[i] = v
+ }
+ s = s[0:i]
+ return
+}
+
+// parseBase128Int parses a base-128 encoded int from the given offset in the
+// given byte slice. It returns the value and the new offset.
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
+ offset = initOffset
+ var ret64 int64
+ for shifted := 0; offset < len(bytes); shifted++ {
+ // 5 * 7 bits per byte == 35 bits of data
+ // Thus the representation is either non-minimal or too large for an int32
+ if shifted == 5 {
+ err = fmt.Errorf("base 128 integer too large")
+ return
+ }
+ ret64 <<= 7
+ b := bytes[offset]
+ // integers should be minimally encoded, so the leading octet should
+ // never be 0x80
+ if shifted == 0 && b == 0x80 {
+ err = fmt.Errorf("integer is not minimally encoded")
+ return
+ }
+ ret64 |= int64(b & 0x7f)
+ offset++
+ if b&0x80 == 0 {
+ ret = int(ret64)
+ // Ensure that the returned value fits in an int on all platforms
+ if ret64 > math.MaxInt32 {
+ err = fmt.Errorf("base 128 integer too large")
+ }
+ return
+ }
+ }
+ err = fmt.Errorf("truncated base 128 integer")
+ return
+}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go
new file mode 100644
index 0000000..20b500f
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/content_int.go
@@ -0,0 +1,25 @@
+package ber
+
+func encodeUnsignedInteger(i uint64) []byte {
+ n := uint64Length(i)
+ out := make([]byte, n)
+
+ var j int
+ for ; n > 0; n-- {
+ out[j] = byte(i >> uint((n-1)*8))
+ j++
+ }
+
+ return out
+}
+
+func uint64Length(i uint64) (numBytes int) {
+ numBytes = 1
+
+ for i > 255 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go
new file mode 100644
index 0000000..51215f0
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/generalizedTime.go
@@ -0,0 +1,105 @@
+package ber
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "time"
+)
+
+// ErrInvalidTimeFormat is returned when the generalizedTime string was not correct.
+var ErrInvalidTimeFormat = errors.New("invalid time format")
+
+var zeroTime = time.Time{}
+
+// ParseGeneralizedTime parses a string value and if it conforms to
+// GeneralizedTime[^0] format, will return a time.Time for that value.
+//
+// [^0]: https://www.itu.int/rec/T-REC-X.690-201508-I/en Section 11.7
+func ParseGeneralizedTime(v []byte) (time.Time, error) {
+ var format string
+ var fract time.Duration
+
+ str := []byte(DecodeString(v))
+ tzIndex := bytes.IndexAny(str, "Z+-")
+ if tzIndex < 0 {
+ return zeroTime, ErrInvalidTimeFormat
+ }
+
+ dot := bytes.IndexAny(str, ".,")
+ switch dot {
+ case -1:
+ switch tzIndex {
+ case 10:
+ format = `2006010215Z`
+ case 12:
+ format = `200601021504Z`
+ case 14:
+ format = `20060102150405Z`
+ default:
+ return zeroTime, ErrInvalidTimeFormat
+ }
+
+ case 10, 12:
+ if tzIndex < dot {
+ return zeroTime, ErrInvalidTimeFormat
+ }
+ // a "," is also allowed, but would not be parsed by time.Parse():
+ str[dot] = '.'
+
+ // If is omitted, then represents a fraction of an
+ // hour; otherwise, if and are omitted, then
+ // represents a fraction of a minute; otherwise,
+ // represents a fraction of a second.
+
+ // parse as float from dot to timezone
+ f, err := strconv.ParseFloat(string(str[dot:tzIndex]), 64)
+ if err != nil {
+ return zeroTime, fmt.Errorf("failed to parse float: %s", err)
+ }
+ // ...and strip that part
+ str = append(str[:dot], str[tzIndex:]...)
+ tzIndex = dot
+
+ if dot == 10 {
+ fract = time.Duration(int64(f * float64(time.Hour)))
+ format = `2006010215Z`
+ } else {
+ fract = time.Duration(int64(f * float64(time.Minute)))
+ format = `200601021504Z`
+ }
+
+ case 14:
+ if tzIndex < dot {
+ return zeroTime, ErrInvalidTimeFormat
+ }
+ str[dot] = '.'
+ // no need for fractional seconds, time.Parse() handles that
+ format = `20060102150405Z`
+
+ default:
+ return zeroTime, ErrInvalidTimeFormat
+ }
+
+ l := len(str)
+ switch l - tzIndex {
+ case 1:
+ if str[l-1] != 'Z' {
+ return zeroTime, ErrInvalidTimeFormat
+ }
+ case 3:
+ format += `0700`
+ str = append(str, []byte("00")...)
+ case 5:
+ format += `0700`
+ default:
+ return zeroTime, ErrInvalidTimeFormat
+ }
+
+ t, err := time.Parse(format, string(str))
+ if err != nil {
+ return zeroTime, fmt.Errorf("%s: %s", ErrInvalidTimeFormat, err)
+ }
+ return t.Add(fract), nil
+}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/header.go b/vendor/github.com/go-asn1-ber/asn1-ber/header.go
new file mode 100644
index 0000000..7dfa6b9
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/header.go
@@ -0,0 +1,38 @@
+package ber
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) {
+ var (
+ c, l int
+ i Identifier
+ )
+
+ if i, c, err = readIdentifier(reader); err != nil {
+ return Identifier{}, 0, read, err
+ }
+ identifier = i
+ read += c
+
+ if l, c, err = readLength(reader); err != nil {
+ return Identifier{}, 0, read, err
+ }
+ length = l
+ read += c
+
+ // Validate length type with identifier (x.600, 8.1.3.2.a)
+ if length == LengthIndefinite && identifier.TagType == TypePrimitive {
+ return Identifier{}, 0, read, errors.New("indefinite length used with primitive type")
+ }
+
+ if length < LengthIndefinite {
+ err = fmt.Errorf("length cannot be less than %d", LengthIndefinite)
+ return
+ }
+
+ return identifier, length, read, nil
+}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
new file mode 100644
index 0000000..c501d81
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/identifier.go
@@ -0,0 +1,112 @@
+package ber
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+func readIdentifier(reader io.Reader) (Identifier, int, error) {
+ identifier := Identifier{}
+ read := 0
+
+ // identifier byte
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading identifier byte: %v\n", err)
+ }
+ return Identifier{}, read, err
+ }
+ read++
+
+ identifier.ClassType = Class(b) & ClassBitmask
+ identifier.TagType = Type(b) & TypeBitmask
+
+ if tag := Tag(b) & TagBitmask; tag != HighTag {
+ // short-form tag
+ identifier.Tag = tag
+ return identifier, read, nil
+ }
+
+ // high-tag-number tag
+ tagBytes := 0
+ for {
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
+ }
+ return Identifier{}, read, unexpectedEOF(err)
+ }
+ tagBytes++
+ read++
+
+ // Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b)
+ identifier.Tag <<= 7
+ identifier.Tag |= Tag(b) & HighTagValueBitmask
+
+ // First byte may not be all zeros (x.690, 8.1.2.4.2.c)
+ if tagBytes == 1 && identifier.Tag == 0 {
+ return Identifier{}, read, errors.New("invalid first high-tag-number tag byte")
+ }
+ // Overflow of int64
+ // TODO: support big int tags?
+ if tagBytes > 9 {
+ return Identifier{}, read, errors.New("high-tag-number tag overflow")
+ }
+
+ // Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a)
+ if Tag(b)&HighTagContinueBitmask == 0 {
+ break
+ }
+ }
+
+ return identifier, read, nil
+}
+
+func encodeIdentifier(identifier Identifier) []byte {
+ b := []byte{0x0}
+ b[0] |= byte(identifier.ClassType)
+ b[0] |= byte(identifier.TagType)
+
+ if identifier.Tag < HighTag {
+ // Short-form
+ b[0] |= byte(identifier.Tag)
+ } else {
+ // high-tag-number
+ b[0] |= byte(HighTag)
+
+ tag := identifier.Tag
+
+ b = append(b, encodeHighTag(tag)...)
+ }
+ return b
+}
+
+func encodeHighTag(tag Tag) []byte {
+ // set cap=4 to hopefully avoid additional allocations
+ b := make([]byte, 0, 4)
+ for tag != 0 {
+ // t := last 7 bits of tag (HighTagValueBitmask = 0x7F)
+ t := tag & HighTagValueBitmask
+
+ // right shift tag 7 to remove what was just pulled off
+ tag >>= 7
+
+ // if b already has entries this entry needs a continuation bit (0x80)
+ if len(b) != 0 {
+ t |= HighTagContinueBitmask
+ }
+
+ b = append(b, byte(t))
+ }
+ // reverse
+ // since bits were pulled off 'tag' small to high the byte slice is in reverse order.
+ // example: tag = 0xFF results in {0x7F, 0x01 + 0x80 (continuation bit)}
+ // this needs to be reversed into 0x81 0x7F
+ for i, j := 0, len(b)-1; i < len(b)/2; i++ {
+ b[i], b[j-i] = b[j-i], b[i]
+ }
+ return b
+}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/length.go b/vendor/github.com/go-asn1-ber/asn1-ber/length.go
new file mode 100644
index 0000000..2c81cc3
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/length.go
@@ -0,0 +1,81 @@
+package ber
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+func readLength(reader io.Reader) (length int, read int, err error) {
+ // length byte
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading length byte: %v\n", err)
+ }
+ return 0, 0, unexpectedEOF(err)
+ }
+ read++
+
+ switch {
+ case b == 0xFF:
+ // Invalid 0xFF (x.600, 8.1.3.5.c)
+ return 0, read, errors.New("invalid length byte 0xff")
+
+ case b == LengthLongFormBitmask:
+ // Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6)
+ length = LengthIndefinite
+
+ case b&LengthLongFormBitmask == 0:
+ // Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4)
+ length = int(b) & LengthValueBitmask
+
+ case b&LengthLongFormBitmask != 0:
+ // Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b)
+ lengthBytes := int(b) & LengthValueBitmask
+ // Protect against overflow
+ // TODO: support big int length?
+ if lengthBytes > 8 {
+ return 0, read, errors.New("long-form length overflow")
+ }
+
+ // Accumulate into a 64-bit variable
+ var length64 int64
+ for i := 0; i < lengthBytes; i++ {
+ b, err = readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
+ }
+ return 0, read, unexpectedEOF(err)
+ }
+ read++
+
+ // x.600, 8.1.3.5
+ length64 <<= 8
+ length64 |= int64(b)
+ }
+
+ // Cast to a platform-specific integer
+ length = int(length64)
+ // Ensure we didn't overflow
+ if int64(length) != length64 {
+ return 0, read, errors.New("long-form length overflow")
+ }
+
+ default:
+ return 0, read, errors.New("invalid length byte")
+ }
+
+ return length, read, nil
+}
+
+func encodeLength(length int) []byte {
+ lengthBytes := encodeUnsignedInteger(uint64(length))
+ if length > 127 || len(lengthBytes) > 1 {
+ longFormBytes := []byte{LengthLongFormBitmask | byte(len(lengthBytes))}
+ longFormBytes = append(longFormBytes, lengthBytes...)
+ lengthBytes = longFormBytes
+ }
+ return lengthBytes
+}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/real.go b/vendor/github.com/go-asn1-ber/asn1-ber/real.go
new file mode 100644
index 0000000..9f637a5
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/real.go
@@ -0,0 +1,163 @@
+package ber
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+)
+
+func encodeFloat(v float64) []byte {
+ switch {
+ case math.IsInf(v, 1):
+ return []byte{0x40}
+ case math.IsInf(v, -1):
+ return []byte{0x41}
+ case math.IsNaN(v):
+ return []byte{0x42}
+ case v == 0.0:
+ if math.Signbit(v) {
+ return []byte{0x43}
+ }
+ return []byte{}
+ default:
+ // we take the easy part ;-)
+ value := []byte(strconv.FormatFloat(v, 'G', -1, 64))
+ var ret []byte
+ if bytes.Contains(value, []byte{'E'}) {
+ ret = []byte{0x03}
+ } else {
+ ret = []byte{0x02}
+ }
+ ret = append(ret, value...)
+ return ret
+ }
+}
+
+func ParseReal(v []byte) (val float64, err error) {
+ if len(v) == 0 {
+ return 0.0, nil
+ }
+ switch {
+ case v[0]&0x80 == 0x80:
+ val, err = parseBinaryFloat(v)
+ case v[0]&0xC0 == 0x40:
+ val, err = parseSpecialFloat(v)
+ case v[0]&0xC0 == 0x0:
+ val, err = parseDecimalFloat(v)
+ default:
+ return 0.0, fmt.Errorf("invalid info block")
+ }
+ if err != nil {
+ return 0.0, err
+ }
+
+ if val == 0.0 && !math.Signbit(val) {
+ return 0.0, errors.New("REAL value +0 must be encoded with zero-length value block")
+ }
+ return val, nil
+}
+
+func parseBinaryFloat(v []byte) (float64, error) {
+ var info byte
+ var buf []byte
+
+ info, v = v[0], v[1:]
+
+ var base int
+ switch info & 0x30 {
+ case 0x00:
+ base = 2
+ case 0x10:
+ base = 8
+ case 0x20:
+ base = 16
+ case 0x30:
+ return 0.0, errors.New("bits 6 and 5 of information octet for REAL are equal to 11")
+ }
+
+ scale := uint((info & 0x0c) >> 2)
+
+ var expLen int
+ switch info & 0x03 {
+ case 0x00:
+ expLen = 1
+ case 0x01:
+ expLen = 2
+ case 0x02:
+ expLen = 3
+ case 0x03:
+ if len(v) < 2 {
+ return 0.0, errors.New("invalid data")
+ }
+ expLen = int(v[0])
+ if expLen > 8 {
+ return 0.0, errors.New("too big value of exponent")
+ }
+ v = v[1:]
+ }
+ if expLen > len(v) {
+ return 0.0, errors.New("too big value of exponent")
+ }
+ buf, v = v[:expLen], v[expLen:]
+ exponent, err := ParseInt64(buf)
+ if err != nil {
+ return 0.0, err
+ }
+
+ if len(v) > 8 {
+ return 0.0, errors.New("too big value of mantissa")
+ }
+
+ mant, err := ParseInt64(v)
+ if err != nil {
+ return 0.0, err
+ }
+ mantissa := mant << scale
+
+ if info&0x40 == 0x40 {
+ mantissa = -mantissa
+ }
+
+ return float64(mantissa) * math.Pow(float64(base), float64(exponent)), nil
+}
+
+func parseDecimalFloat(v []byte) (val float64, err error) {
+ switch v[0] & 0x3F {
+ case 0x01: // NR form 1
+ var iVal int64
+ iVal, err = strconv.ParseInt(strings.TrimLeft(string(v[1:]), " "), 10, 64)
+ val = float64(iVal)
+ case 0x02, 0x03: // NR form 2, 3
+ val, err = strconv.ParseFloat(strings.Replace(strings.TrimLeft(string(v[1:]), " "), ",", ".", -1), 64)
+ default:
+ err = errors.New("incorrect NR form")
+ }
+ if err != nil {
+ return 0.0, err
+ }
+
+ if val == 0.0 && math.Signbit(val) {
+ return 0.0, errors.New("REAL value -0 must be encoded as a special value")
+ }
+ return val, nil
+}
+
+func parseSpecialFloat(v []byte) (float64, error) {
+ if len(v) != 1 {
+ return 0.0, errors.New(`encoding of "special value" must not contain exponent and mantissa`)
+ }
+ switch v[0] {
+ case 0x40:
+ return math.Inf(1), nil
+ case 0x41:
+ return math.Inf(-1), nil
+ case 0x42:
+ return math.NaN(), nil
+ case 0x43:
+ return math.Copysign(0, -1), nil
+ }
+ return 0.0, errors.New(`encoding of "special value" not from ASN.1 standard`)
+}
diff --git a/vendor/github.com/go-asn1-ber/asn1-ber/util.go b/vendor/github.com/go-asn1-ber/asn1-ber/util.go
new file mode 100644
index 0000000..da45e9f
--- /dev/null
+++ b/vendor/github.com/go-asn1-ber/asn1-ber/util.go
@@ -0,0 +1,28 @@
+package ber
+
+import "io"
+
+func readByte(reader io.Reader) (byte, error) {
+ bytes := make([]byte, 1)
+ _, err := io.ReadFull(reader, bytes)
+ if err != nil {
+ return 0, err
+ }
+ return bytes[0], nil
+}
+
+func unexpectedEOF(err error) error {
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+}
+
+func isEOCPacket(p *Packet) bool {
+ return p != nil &&
+ p.Tag == TagEOC &&
+ p.ClassType == ClassUniversal &&
+ p.TagType == TypePrimitive &&
+ len(p.ByteValue) == 0 &&
+ len(p.Children) == 0
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/LICENSE b/vendor/github.com/go-ldap/ldap/v3/LICENSE
new file mode 100644
index 0000000..ef07293
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
+Portions copyright (c) 2015-2024 go-ldap Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-ldap/ldap/v3/add.go b/vendor/github.com/go-ldap/ldap/v3/add.go
new file mode 100644
index 0000000..ab32b0b
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/add.go
@@ -0,0 +1,89 @@
+package ldap
+
+import (
+ "fmt"
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// Attribute represents an LDAP attribute
+type Attribute struct {
+ // Type is the name of the LDAP attribute
+ Type string
+ // Vals are the LDAP attribute values
+ Vals []string
+}
+
+func (a *Attribute) encode() *ber.Packet {
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
+ seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
+ set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
+ for _, value := range a.Vals {
+ set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
+ }
+ seq.AppendChild(set)
+ return seq
+}
+
+// AddRequest represents an LDAP AddRequest operation
+type AddRequest struct {
+ // DN identifies the entry being added
+ DN string
+ // Attributes list the attributes of the new entry
+ Attributes []Attribute
+ // Controls hold optional controls to send with the request
+ Controls []Control
+}
+
+func (req *AddRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
+ attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
+ for _, attribute := range req.Attributes {
+ attributes.AppendChild(attribute.encode())
+ }
+ pkt.AppendChild(attributes)
+
+ envelope.AppendChild(pkt)
+ if len(req.Controls) > 0 {
+ envelope.AppendChild(encodeControls(req.Controls))
+ }
+
+ return nil
+}
+
+// Attribute adds an attribute with the given type and values
+func (req *AddRequest) Attribute(attrType string, attrVals []string) {
+ req.Attributes = append(req.Attributes, Attribute{Type: attrType, Vals: attrVals})
+}
+
+// NewAddRequest returns an AddRequest for the given DN, with no attributes
+func NewAddRequest(dn string, controls []Control) *AddRequest {
+ return &AddRequest{
+ DN: dn,
+ Controls: controls,
+ }
+}
+
+// Add performs the given AddRequest
+func (l *Conn) Add(addRequest *AddRequest) error {
+ msgCtx, err := l.doRequest(addRequest)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return err
+ }
+
+ if packet.Children[1].Tag == ApplicationAddResponse {
+ err := GetLDAPError(packet)
+ if err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("ldap: unexpected response: %d", packet.Children[1].Tag)
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/bind.go b/vendor/github.com/go-ldap/ldap/v3/bind.go
new file mode 100644
index 0000000..6cfd37e
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/bind.go
@@ -0,0 +1,817 @@
+package ldap
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "encoding/hex"
+ enchex "encoding/hex"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "strings"
+ "unicode/utf16"
+
+ "github.com/Azure/go-ntlmssp"
+ ber "github.com/go-asn1-ber/asn1-ber"
+ "golang.org/x/crypto/md4" //nolint:staticcheck
+)
+
+// SimpleBindRequest represents a username/password bind operation
+type SimpleBindRequest struct {
+ // Username is the name of the Directory object that the client wishes to bind as
+ Username string
+ // Password is the credentials to bind with
+ Password string
+ // Controls are optional controls to send with the bind request
+ Controls []Control
+ // AllowEmptyPassword sets whether the client allows binding with an empty password
+ // (normally used for unauthenticated bind).
+ AllowEmptyPassword bool
+}
+
+// SimpleBindResult contains the response from the server
+type SimpleBindResult struct {
+ Controls []Control
+}
+
+// NewSimpleBindRequest returns a bind request
+func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
+ return &SimpleBindRequest{
+ Username: username,
+ Password: password,
+ Controls: controls,
+ AllowEmptyPassword: false,
+ }
+}
+
+func (req *SimpleBindRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Username, "User Name"))
+ pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.Password, "Password"))
+
+ envelope.AppendChild(pkt)
+ if len(req.Controls) > 0 {
+ envelope.AppendChild(encodeControls(req.Controls))
+ }
+
+ return nil
+}
+
+// SimpleBind performs the simple bind operation defined in the given request
+func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
+ if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword {
+ return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
+ }
+
+ msgCtx, err := l.doRequest(simpleBindRequest)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ result := &SimpleBindResult{
+ Controls: make([]Control, 0),
+ }
+
+ if len(packet.Children) == 3 {
+ for _, child := range packet.Children[2].Children {
+ decodedChild, decodeErr := DecodeControl(child)
+ if decodeErr != nil {
+ return nil, fmt.Errorf("failed to decode child control: %s", decodeErr)
+ }
+ result.Controls = append(result.Controls, decodedChild)
+ }
+ }
+
+ err = GetLDAPError(packet)
+ return result, err
+}
+
+// Bind performs a bind with the given username and password.
+//
+// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method
+// for that.
+func (l *Conn) Bind(username, password string) error {
+ req := &SimpleBindRequest{
+ Username: username,
+ Password: password,
+ AllowEmptyPassword: false,
+ }
+ _, err := l.SimpleBind(req)
+ return err
+}
+
+// UnauthenticatedBind performs an unauthenticated bind.
+//
+// A username may be provided for trace (e.g. logging) purpose only, but it is normally not
+// authenticated or otherwise validated by the LDAP server.
+//
+// See https://tools.ietf.org/html/rfc4513#section-5.1.2 .
+// See https://tools.ietf.org/html/rfc4513#section-6.3.1 .
+func (l *Conn) UnauthenticatedBind(username string) error {
+ req := &SimpleBindRequest{
+ Username: username,
+ Password: "",
+ AllowEmptyPassword: true,
+ }
+ _, err := l.SimpleBind(req)
+ return err
+}
+
+// DigestMD5BindRequest represents a digest-md5 bind operation
+type DigestMD5BindRequest struct {
+ Host string
+ // Username is the name of the Directory object that the client wishes to bind as
+ Username string
+ // Password is the credentials to bind with
+ Password string
+ // Controls are optional controls to send with the bind request
+ Controls []Control
+}
+
+func (req *DigestMD5BindRequest) appendTo(envelope *ber.Packet) error {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
+
+ auth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
+ auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "DIGEST-MD5", "SASL Mech"))
+ request.AppendChild(auth)
+ envelope.AppendChild(request)
+ if len(req.Controls) > 0 {
+ envelope.AppendChild(encodeControls(req.Controls))
+ }
+ return nil
+}
+
+// DigestMD5BindResult contains the response from the server
+type DigestMD5BindResult struct {
+ Controls []Control
+}
+
+// MD5Bind performs a digest-md5 bind with the given host, username and password.
+func (l *Conn) MD5Bind(host, username, password string) error {
+ req := &DigestMD5BindRequest{
+ Host: host,
+ Username: username,
+ Password: password,
+ }
+ _, err := l.DigestMD5Bind(req)
+ return err
+}
+
+// DigestMD5Bind performs the digest-md5 bind operation defined in the given request
+func (l *Conn) DigestMD5Bind(digestMD5BindRequest *DigestMD5BindRequest) (*DigestMD5BindResult, error) {
+ if digestMD5BindRequest.Password == "" {
+ return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
+ }
+
+ msgCtx, err := l.doRequest(digestMD5BindRequest)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return nil, err
+ }
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if l.Debug {
+ if err = addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ result := &DigestMD5BindResult{
+ Controls: make([]Control, 0),
+ }
+ var params map[string]string
+ if len(packet.Children) == 2 {
+ if len(packet.Children[1].Children) == 4 {
+ child := packet.Children[1].Children[0]
+ if child.Tag != ber.TagEnumerated {
+ return result, GetLDAPError(packet)
+ }
+ if child.Value.(int64) != 14 {
+ return result, GetLDAPError(packet)
+ }
+ child = packet.Children[1].Children[3]
+ if child.Tag != ber.TagObjectDescriptor {
+ return result, GetLDAPError(packet)
+ }
+ if child.Data == nil {
+ return result, GetLDAPError(packet)
+ }
+ data, _ := ioutil.ReadAll(child.Data)
+ params, err = parseParams(string(data))
+ if err != nil {
+ return result, fmt.Errorf("parsing digest-challenge: %s", err)
+ }
+ }
+ }
+
+ if len(params) > 0 {
+ resp := computeResponse(
+ params,
+ "ldap/"+strings.ToLower(digestMD5BindRequest.Host),
+ digestMD5BindRequest.Username,
+ digestMD5BindRequest.Password,
+ )
+ packet = ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
+
+ auth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
+ auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "DIGEST-MD5", "SASL Mech"))
+ auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, resp, "Credentials"))
+ request.AppendChild(auth)
+ packet.AppendChild(request)
+ msgCtx, err = l.sendMessage(packet)
+ if err != nil {
+ return nil, fmt.Errorf("send message: %s", err)
+ }
+ defer l.finishMessage(msgCtx)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, fmt.Errorf("read packet: %s", err)
+ }
+
+ if len(packet.Children) == 2 {
+ response := packet.Children[1]
+ if response == nil {
+ return result, GetLDAPError(packet)
+ }
+ if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
+ if ber.Type(response.Children[0].Tag) == ber.Type(ber.TagInteger) || ber.Type(response.Children[0].Tag) == ber.Type(ber.TagEnumerated) {
+ resultCode := uint16(response.Children[0].Value.(int64))
+ if resultCode == 14 {
+ msgCtx, err := l.doRequest(digestMD5BindRequest)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, fmt.Errorf("read packet: %s", err)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ err = GetLDAPError(packet)
+ return result, err
+}
+
+func parseParams(str string) (map[string]string, error) {
+ m := make(map[string]string)
+ var key, value string
+ var state int
+ for i := 0; i <= len(str); i++ {
+ switch state {
+ case 0: // reading key
+ if i == len(str) {
+ return nil, fmt.Errorf("syntax error on %d", i)
+ }
+ if str[i] != '=' {
+ key += string(str[i])
+ continue
+ }
+ state = 1
+ case 1: // reading value
+ if i == len(str) {
+ m[key] = value
+ break
+ }
+ switch str[i] {
+ case ',':
+ m[key] = value
+ state = 0
+ key = ""
+ value = ""
+ case '"':
+ if value != "" {
+ return nil, fmt.Errorf("syntax error on %d", i)
+ }
+ state = 2
+ default:
+ value += string(str[i])
+ }
+ case 2: // inside quotes
+ if i == len(str) {
+ return nil, fmt.Errorf("syntax error on %d", i)
+ }
+ if str[i] != '"' {
+ value += string(str[i])
+ } else {
+ state = 1
+ }
+ }
+ }
+ return m, nil
+}
+
+func computeResponse(params map[string]string, uri, username, password string) string {
+ nc := "00000001"
+ qop := "auth"
+ cnonce := enchex.EncodeToString(randomBytes(16))
+ x := username + ":" + params["realm"] + ":" + password
+ y := md5Hash([]byte(x))
+
+ a1 := bytes.NewBuffer(y)
+ a1.WriteString(":" + params["nonce"] + ":" + cnonce)
+ if len(params["authzid"]) > 0 {
+ a1.WriteString(":" + params["authzid"])
+ }
+ a2 := bytes.NewBuffer([]byte("AUTHENTICATE"))
+ a2.WriteString(":" + uri)
+ ha1 := enchex.EncodeToString(md5Hash(a1.Bytes()))
+ ha2 := enchex.EncodeToString(md5Hash(a2.Bytes()))
+
+ kd := ha1
+ kd += ":" + params["nonce"]
+ kd += ":" + nc
+ kd += ":" + cnonce
+ kd += ":" + qop
+ kd += ":" + ha2
+ resp := enchex.EncodeToString(md5Hash([]byte(kd)))
+ return fmt.Sprintf(
+ `username="%s",realm="%s",nonce="%s",cnonce="%s",nc=00000001,qop=%s,digest-uri="%s",response=%s`,
+ username,
+ params["realm"],
+ params["nonce"],
+ cnonce,
+ qop,
+ uri,
+ resp,
+ )
+}
+
+func md5Hash(b []byte) []byte {
+ hasher := md5.New()
+ hasher.Write(b)
+ return hasher.Sum(nil)
+}
+
+func randomBytes(len int) []byte {
+ b := make([]byte, len)
+ for i := 0; i < len; i++ {
+ b[i] = byte(rand.Intn(256))
+ }
+ return b
+}
+
+var externalBindRequest = requestFunc(func(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
+
+ saslAuth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
+ saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "EXTERNAL", "SASL Mech"))
+ saslAuth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "SASL Cred"))
+
+ pkt.AppendChild(saslAuth)
+
+ envelope.AppendChild(pkt)
+
+ return nil
+})
+
+// ExternalBind performs SASL/EXTERNAL authentication.
+//
+// Use ldap.DialURL("ldapi://") to connect to the Unix socket before ExternalBind.
+//
+// See https://tools.ietf.org/html/rfc4422#appendix-A
+func (l *Conn) ExternalBind() error {
+ msgCtx, err := l.doRequest(externalBindRequest)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return err
+ }
+
+ return GetLDAPError(packet)
+}
+
+// NTLMBind performs an NTLMSSP bind leveraging https://github.com/Azure/go-ntlmssp
+
+// NTLMBindRequest represents an NTLMSSP bind operation
+type NTLMBindRequest struct {
+ // Domain is the AD Domain to authenticate too. If not specified, it will be grabbed from the NTLMSSP Challenge
+ Domain string
+ // Username is the name of the Directory object that the client wishes to bind as
+ Username string
+ // Password is the credentials to bind with
+ Password string
+ // AllowEmptyPassword sets whether the client allows binding with an empty password
+ // (normally used for unauthenticated bind).
+ AllowEmptyPassword bool
+ // Hash is the hex NTLM hash to bind with. Password or hash must be provided
+ Hash string
+ // Controls are optional controls to send with the bind request
+ Controls []Control
+ // Negotiator allows to specify a custom NTLM negotiator.
+ Negotiator NTLMNegotiator
+}
+
+// NTLMNegotiator is an abstraction of an NTLM implementation that produces and
+// processes NTLM binary tokens.
+type NTLMNegotiator interface {
+ Negotiate(domain string, workstation string) ([]byte, error)
+ ChallengeResponse(challenge []byte, username string, hash string) ([]byte, error)
+}
+
+func (req *NTLMBindRequest) appendTo(envelope *ber.Packet) (err error) {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
+
+ var negMessage []byte
+
+ // generate an NTLMSSP Negotiation message for the specified domain (it can be blank)
+ switch {
+ case req.Negotiator == nil:
+ negMessage, err = ntlmssp.NewNegotiateMessage(req.Domain, "")
+ if err != nil {
+ return fmt.Errorf("create NTLM negotiate message: %s", err)
+ }
+ default:
+ negMessage, err = req.Negotiator.Negotiate(req.Domain, "")
+ if err != nil {
+ return fmt.Errorf("create NTLM negotiate message with custom negotiator: %s", err)
+ }
+ }
+
+ // append the generated NTLMSSP message as a TagEnumerated BER value
+ auth := ber.Encode(ber.ClassContext, ber.TypePrimitive, ber.TagEnumerated, negMessage, "authentication")
+ request.AppendChild(auth)
+ envelope.AppendChild(request)
+ if len(req.Controls) > 0 {
+ envelope.AppendChild(encodeControls(req.Controls))
+ }
+ return nil
+}
+
+// NTLMBindResult contains the response from the server
+type NTLMBindResult struct {
+ Controls []Control
+}
+
+// NTLMBind performs an NTLMSSP Bind with the given domain, username and password
+func (l *Conn) NTLMBind(domain, username, password string) error {
+ req := &NTLMBindRequest{
+ Domain: domain,
+ Username: username,
+ Password: password,
+ }
+ _, err := l.NTLMChallengeBind(req)
+ return err
+}
+
+// NTLMUnauthenticatedBind performs an bind with an empty password.
+//
+// A username is required. The anonymous bind is not (yet) supported by the go-ntlmssp library (https://github.com/Azure/go-ntlmssp/blob/819c794454d067543bc61d29f61fef4b3c3df62c/authenticate_message.go#L87)
+//
+// See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/b38c36ed-2804-4868-a9ff-8dd3182128e4 part 3.2.5.1.2
+func (l *Conn) NTLMUnauthenticatedBind(domain, username string) error {
+ req := &NTLMBindRequest{
+ Domain: domain,
+ Username: username,
+ Password: "",
+ AllowEmptyPassword: true,
+ }
+ _, err := l.NTLMChallengeBind(req)
+ return err
+}
+
+// NTLMBindWithHash performs an NTLM Bind with an NTLM hash instead of plaintext password (pass-the-hash)
+func (l *Conn) NTLMBindWithHash(domain, username, hash string) error {
+ req := &NTLMBindRequest{
+ Domain: domain,
+ Username: username,
+ Hash: hash,
+ }
+ _, err := l.NTLMChallengeBind(req)
+ return err
+}
+
+// NTLMChallengeBind performs the NTLMSSP bind operation defined in the given request
+func (l *Conn) NTLMChallengeBind(ntlmBindRequest *NTLMBindRequest) (*NTLMBindResult, error) {
+ if !ntlmBindRequest.AllowEmptyPassword && ntlmBindRequest.Password == "" && ntlmBindRequest.Hash == "" {
+ return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client"))
+ }
+
+ msgCtx, err := l.doRequest(ntlmBindRequest)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return nil, err
+ }
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if l.Debug {
+ if err = addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+ result := &NTLMBindResult{
+ Controls: make([]Control, 0),
+ }
+ var ntlmsspChallenge []byte
+
+ // now find the NTLM Response Message
+ if len(packet.Children) == 2 {
+ if len(packet.Children[1].Children) == 3 {
+ child := packet.Children[1].Children[1]
+ ntlmsspChallenge = child.ByteValue
+ // Check to make sure we got the right message. It will always start with NTLMSSP
+ if len(ntlmsspChallenge) < 7 || !bytes.Equal(ntlmsspChallenge[:7], []byte("NTLMSSP")) {
+ return result, GetLDAPError(packet)
+ }
+ l.Debug.Printf("%d: found ntlmssp challenge", msgCtx.id)
+ }
+ }
+ if ntlmsspChallenge != nil {
+ var err error
+ var responseMessage []byte
+
+ switch {
+ case ntlmBindRequest.Hash == "" && ntlmBindRequest.Password == "" && !ntlmBindRequest.AllowEmptyPassword:
+ err = fmt.Errorf("need a password or hash to generate reply")
+ case ntlmBindRequest.Negotiator == nil && ntlmBindRequest.Hash != "":
+ responseMessage, err = ntlmssp.ProcessChallengeWithHash(ntlmsspChallenge, ntlmBindRequest.Username, ntlmBindRequest.Hash)
+ case ntlmBindRequest.Negotiator == nil && (ntlmBindRequest.Password != "" || ntlmBindRequest.AllowEmptyPassword):
+ // generate a response message to the challenge with the given Username/Password if password is provided
+ _, _, domainNeeded := ntlmssp.GetDomain(ntlmBindRequest.Username)
+ responseMessage, err = ntlmssp.ProcessChallenge(ntlmsspChallenge, ntlmBindRequest.Username, ntlmBindRequest.Password, domainNeeded)
+ default:
+ hash := ntlmBindRequest.Hash
+ if len(hash) == 0 {
+ hash = ntHash(ntlmBindRequest.Password)
+ }
+
+ responseMessage, err = ntlmBindRequest.Negotiator.ChallengeResponse(ntlmsspChallenge, ntlmBindRequest.Username, hash)
+ }
+
+ if err != nil {
+ return result, fmt.Errorf("process NTLM challenge: %s", err)
+ }
+
+ packet = ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
+
+ // append the challenge response message as a TagEmbeddedPDV BER value
+ auth := ber.Encode(ber.ClassContext, ber.TypePrimitive, ber.TagEmbeddedPDV, responseMessage, "authentication")
+
+ request.AppendChild(auth)
+ packet.AppendChild(request)
+ msgCtx, err = l.sendMessage(packet)
+ if err != nil {
+ return nil, fmt.Errorf("send message: %s", err)
+ }
+ defer l.finishMessage(msgCtx)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, fmt.Errorf("read packet: %s", err)
+ }
+
+ }
+
+ err = GetLDAPError(packet)
+ return result, err
+}
+
+func ntHash(pass string) string {
+ runes := utf16.Encode([]rune(pass))
+
+ b := bytes.Buffer{}
+ _ = binary.Write(&b, binary.LittleEndian, &runes)
+
+ hash := md4.New()
+ _, _ = hash.Write(b.Bytes())
+
+ return hex.EncodeToString(hash.Sum(nil))
+}
+
+// GSSAPIClient interface is used as the client-side implementation for the
+// GSSAPI SASL mechanism.
+// Interface inspired by GSSAPIClient from golang.org/x/crypto/ssh
+type GSSAPIClient interface {
+ // InitSecContext initiates the establishment of a security context for
+ // GSS-API between the client and server.
+ // Initially the token parameter should be specified as nil.
+ // The routine may return a outputToken which should be transferred to
+ // the server, where the server will present it to AcceptSecContext.
+ // If no token need be sent, InitSecContext will indicate this by setting
+ // needContinue to false. To complete the context
+ // establishment, one or more reply tokens may be required from the server;
+ // if so, InitSecContext will return a needContinue which is true.
+ // In this case, InitSecContext should be called again when the
+ // reply token is received from the server, passing the reply token
+ // to InitSecContext via the token parameters.
+ // See RFC 4752 section 3.1.
+ InitSecContext(target string, token []byte) (outputToken []byte, needContinue bool, err error)
+ // InitSecContextWithOptions is the same as InitSecContext but allows for additional options to be passed to the context establishment.
+ // See RFC 4752 section 3.1.
+ InitSecContextWithOptions(target string, token []byte, options []int) (outputToken []byte, needContinue bool, err error)
+ // NegotiateSaslAuth performs the last step of the Sasl handshake.
+ // It takes a token, which, when unwrapped, describes the servers supported
+ // security layers (first octet) and maximum receive buffer (remaining
+ // three octets).
+ // If the received token is unacceptable an error must be returned to abort
+ // the handshake.
+ // Outputs a signed token describing the client's selected security layer
+ // and receive buffer size and optionally an authorization identity.
+ // The returned token will be sent to the server and the handshake considered
+ // completed successfully and the server authenticated.
+ // See RFC 4752 section 3.1.
+ NegotiateSaslAuth(token []byte, authzid string) ([]byte, error)
+ // DeleteSecContext destroys any established secure context.
+ DeleteSecContext() error
+}
+
+// GSSAPIBindRequest represents a GSSAPI SASL mechanism bind request.
+// See rfc4752 and rfc4513 section 5.2.1.2.
+type GSSAPIBindRequest struct {
+ // Service Principal Name user for the service ticket. Eg. "ldap/"
+ ServicePrincipalName string
+ // (Optional) Authorization entity
+ AuthZID string
+ // (Optional) Controls to send with the bind request
+ Controls []Control
+}
+
+// GSSAPIBind performs the GSSAPI SASL bind using the provided GSSAPI client.
+func (l *Conn) GSSAPIBind(client GSSAPIClient, servicePrincipal, authzid string) error {
+ return l.GSSAPIBindRequest(client, &GSSAPIBindRequest{
+ ServicePrincipalName: servicePrincipal,
+ AuthZID: authzid,
+ })
+}
+
+// GSSAPIBindRequest performs the GSSAPI SASL bind using the provided GSSAPI client.
+func (l *Conn) GSSAPIBindRequest(client GSSAPIClient, req *GSSAPIBindRequest) error {
+ return l.GSSAPIBindRequestWithAPOptions(client, req, []int{})
+}
+
+// GSSAPIBindRequest performs the GSSAPI SASL bind using the provided GSSAPI client.
+func (l *Conn) GSSAPIBindRequestWithAPOptions(client GSSAPIClient, req *GSSAPIBindRequest, APOptions []int) error {
+ //nolint:errcheck
+ defer client.DeleteSecContext()
+
+ var err error
+ var reqToken []byte
+ var recvToken []byte
+ needInit := true
+ for {
+ if needInit {
+ // Establish secure context between client and server.
+ reqToken, needInit, err = client.InitSecContextWithOptions(req.ServicePrincipalName, recvToken, APOptions)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Secure context is set up, perform the last step of SASL handshake.
+ reqToken, err = client.NegotiateSaslAuth(recvToken, req.AuthZID)
+ if err != nil {
+ return err
+ }
+ }
+ // Send Bind request containing the current token and extract the
+ // token sent by server.
+ recvToken, err = l.saslBindTokenExchange(req.Controls, reqToken)
+ if err != nil {
+ return err
+ }
+
+ if !needInit && len(recvToken) == 0 {
+ break
+ }
+ }
+
+ return nil
+}
+
+func (l *Conn) saslBindTokenExchange(reqControls []Control, reqToken []byte) ([]byte, error) {
+ // Construct LDAP Bind request with GSSAPI SASL mechanism.
+ envelope := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ envelope.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "User Name"))
+
+ auth := ber.Encode(ber.ClassContext, ber.TypeConstructed, 3, "", "authentication")
+ auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "GSSAPI", "SASL Mech"))
+ if len(reqToken) > 0 {
+ auth.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(reqToken), "Credentials"))
+ }
+ request.AppendChild(auth)
+ envelope.AppendChild(request)
+ if len(reqControls) > 0 {
+ envelope.AppendChild(encodeControls(reqControls))
+ }
+
+ msgCtx, err := l.sendMessage(envelope)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return nil, err
+ }
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if l.Debug {
+ if err = addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ // https://www.rfc-editor.org/rfc/rfc4511#section-4.1.1
+ // packet is an envelope
+ // child 0 is message id
+ // child 1 is protocolOp
+ if len(packet.Children) != 2 {
+ return nil, fmt.Errorf("bad bind response")
+ }
+
+ protocolOp := packet.Children[1]
+RESP:
+ switch protocolOp.Description {
+ case "Bind Response": // Bind Response
+ // Bind Reponse is an LDAP Response (https://www.rfc-editor.org/rfc/rfc4511#section-4.1.9)
+ // with an additional optional serverSaslCreds string (https://www.rfc-editor.org/rfc/rfc4511#section-4.2.2)
+ // child 0 is resultCode
+ resultCode := protocolOp.Children[0]
+ if resultCode.Tag != ber.TagEnumerated {
+ break RESP
+ }
+ switch resultCode.Value.(int64) {
+ case 14: // Sasl bind in progress
+ if len(protocolOp.Children) < 3 {
+ break RESP
+ }
+ referral := protocolOp.Children[3]
+ switch referral.Description {
+ case "Referral":
+ if referral.ClassType != ber.ClassContext || referral.Tag != ber.TagObjectDescriptor {
+ break RESP
+ }
+ return ioutil.ReadAll(referral.Data)
+ }
+ // Optional:
+ //if len(protocolOp.Children) == 4 {
+ // serverSaslCreds := protocolOp.Children[4]
+ //}
+ case 0: // Success - Bind OK.
+ // SASL layer in effect (if any) (See https://www.rfc-editor.org/rfc/rfc4513#section-5.2.1.4)
+ // NOTE: SASL security layers are not supported currently.
+ return nil, nil
+ }
+ }
+
+ return nil, GetLDAPError(packet)
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/client.go b/vendor/github.com/go-ldap/ldap/v3/client.go
new file mode 100644
index 0000000..ee473fc
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/client.go
@@ -0,0 +1,42 @@
+package ldap
+
+import (
+ "context"
+ "crypto/tls"
+ "time"
+)
+
+// Client knows how to interact with an LDAP server
+type Client interface {
+ Start()
+ StartTLS(*tls.Config) error
+ Close() error
+ GetLastError() error
+ IsClosing() bool
+ SetTimeout(time.Duration)
+ TLSConnectionState() (tls.ConnectionState, bool)
+
+ Bind(username, password string) error
+ UnauthenticatedBind(username string) error
+ SimpleBind(*SimpleBindRequest) (*SimpleBindResult, error)
+ ExternalBind() error
+ NTLMUnauthenticatedBind(domain, username string) error
+ Unbind() error
+
+ Add(*AddRequest) error
+ Del(*DelRequest) error
+ Modify(*ModifyRequest) error
+ ModifyDN(*ModifyDNRequest) error
+ ModifyWithResult(*ModifyRequest) (*ModifyResult, error)
+ Extended(*ExtendedRequest) (*ExtendedResponse, error)
+
+ Compare(dn, attribute, value string) (bool, error)
+ PasswordModify(*PasswordModifyRequest) (*PasswordModifyResult, error)
+
+ Search(*SearchRequest) (*SearchResult, error)
+ SearchAsync(ctx context.Context, searchRequest *SearchRequest, bufferSize int) Response
+ SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
+ DirSync(searchRequest *SearchRequest, flags, maxAttrCount int64, cookie []byte) (*SearchResult, error)
+ DirSyncAsync(ctx context.Context, searchRequest *SearchRequest, bufferSize int, flags, maxAttrCount int64, cookie []byte) Response
+ Syncrepl(ctx context.Context, searchRequest *SearchRequest, bufferSize int, mode ControlSyncRequestMode, cookie []byte, reloadHint bool) Response
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/compare.go b/vendor/github.com/go-ldap/ldap/v3/compare.go
new file mode 100644
index 0000000..a1cd760
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/compare.go
@@ -0,0 +1,62 @@
+package ldap
+
+import (
+ "fmt"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// CompareRequest represents an LDAP CompareRequest operation.
+type CompareRequest struct {
+ DN string
+ Attribute string
+ Value string
+}
+
+func (req *CompareRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
+
+ ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
+ ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Attribute, "AttributeDesc"))
+ ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.Value, "AssertionValue"))
+
+ pkt.AppendChild(ava)
+
+ envelope.AppendChild(pkt)
+
+ return nil
+}
+
+// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
+// false with any error that occurs if any.
+func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
+ msgCtx, err := l.doRequest(&CompareRequest{
+ DN: dn,
+ Attribute: attribute,
+ Value: value,
+ })
+ if err != nil {
+ return false, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return false, err
+ }
+
+ if packet.Children[1].Tag == ApplicationCompareResponse {
+ err := GetLDAPError(packet)
+
+ switch {
+ case IsErrorWithCode(err, LDAPResultCompareTrue):
+ return true, nil
+ case IsErrorWithCode(err, LDAPResultCompareFalse):
+ return false, nil
+ default:
+ return false, err
+ }
+ }
+ return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/conn.go b/vendor/github.com/go-ldap/ldap/v3/conn.go
new file mode 100644
index 0000000..05febbc
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/conn.go
@@ -0,0 +1,636 @@
+package ldap
+
+import (
+ "bufio"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+const (
+ // MessageQuit causes the processMessages loop to exit
+ MessageQuit = 0
+ // MessageRequest sends a request to the server
+ MessageRequest = 1
+ // MessageResponse receives a response from the server
+ MessageResponse = 2
+ // MessageFinish indicates the client considers a particular message ID to be finished
+ MessageFinish = 3
+ // MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
+ MessageTimeout = 4
+)
+
+const (
+ // DefaultLdapPort default ldap port for pure TCP connection
+ DefaultLdapPort = "389"
+ // DefaultLdapsPort default ldap port for SSL connection
+ DefaultLdapsPort = "636"
+)
+
+// PacketResponse contains the packet or error encountered reading a response
+type PacketResponse struct {
+ // Packet is the packet read from the server
+ Packet *ber.Packet
+ // Error is an error encountered while reading
+ Error error
+}
+
+// ReadPacket returns the packet or an error
+func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
+ if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
+ }
+ return pr.Packet, pr.Error
+}
+
+type messageContext struct {
+ id int64
+ // close(done) should only be called from finishMessage()
+ done chan struct{}
+ // close(responses) should only be called from processMessages(), and only sent to from sendResponse()
+ responses chan *PacketResponse
+}
+
+// sendResponse should only be called within the processMessages() loop which
+// is also responsible for closing the responses channel.
+func (msgCtx *messageContext) sendResponse(packet *PacketResponse, timeout time.Duration) {
+ timeoutCtx := context.Background()
+ if timeout > 0 {
+ var cancelFunc context.CancelFunc
+ timeoutCtx, cancelFunc = context.WithTimeout(context.Background(), timeout)
+ defer cancelFunc()
+ }
+ select {
+ case msgCtx.responses <- packet:
+ // Successfully sent packet to message handler.
+ case <-msgCtx.done:
+ // The request handler is done and will not receive more
+ // packets.
+ case <-timeoutCtx.Done():
+ // The timeout was reached before the packet was sent.
+ }
+}
+
+type messagePacket struct {
+ Op int
+ MessageID int64
+ Packet *ber.Packet
+ Context *messageContext
+}
+
+type sendMessageFlags uint
+
+const (
+ startTLS sendMessageFlags = 1 << iota
+)
+
+// Conn represents an LDAP Connection
+type Conn struct {
+ // requestTimeout is loaded atomically
+ // so we need to ensure 64-bit alignment on 32-bit platforms.
+ // https://github.com/go-ldap/ldap/pull/199
+ requestTimeout int64
+ conn net.Conn
+ isTLS bool
+ closing uint32
+ closeErr atomic.Value
+ isStartingTLS bool
+ Debug debugging
+ chanConfirm chan struct{}
+ messageContexts map[int64]*messageContext
+ chanMessage chan *messagePacket
+ chanMessageID chan int64
+ wgClose sync.WaitGroup
+ outstandingRequests uint
+ messageMutex sync.Mutex
+
+ err error
+}
+
+var _ Client = &Conn{}
+
+// DefaultTimeout is a package-level variable that sets the timeout value
+// used for the Dial and DialTLS methods.
+//
+// WARNING: since this is a package-level variable, setting this value from
+// multiple places will probably result in undesired behaviour.
+var DefaultTimeout = 60 * time.Second
+
+// DialOpt configures DialContext.
+type DialOpt func(*DialContext)
+
+// DialWithDialer updates net.Dialer in DialContext.
+func DialWithDialer(d *net.Dialer) DialOpt {
+ return func(dc *DialContext) {
+ dc.dialer = d
+ }
+}
+
+// DialWithTLSConfig updates tls.Config in DialContext.
+func DialWithTLSConfig(tc *tls.Config) DialOpt {
+ return func(dc *DialContext) {
+ dc.tlsConfig = tc
+ }
+}
+
+// DialWithTLSDialer is a wrapper for DialWithTLSConfig with the option to
+// specify a net.Dialer to for example define a timeout or a custom resolver.
+//
+// Deprecated: Use DialWithDialer and DialWithTLSConfig instead
+func DialWithTLSDialer(tlsConfig *tls.Config, dialer *net.Dialer) DialOpt {
+ return func(dc *DialContext) {
+ dc.tlsConfig = tlsConfig
+ dc.dialer = dialer
+ }
+}
+
+// DialContext contains necessary parameters to dial the given ldap URL.
+type DialContext struct {
+ dialer *net.Dialer
+ tlsConfig *tls.Config
+}
+
+func (dc *DialContext) dial(u *url.URL) (net.Conn, error) {
+ if u.Scheme == "ldapi" {
+ if u.Path == "" || u.Path == "/" {
+ u.Path = "/var/run/slapd/ldapi"
+ }
+ return dc.dialer.Dial("unix", u.Path)
+ }
+
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // we assume that error is due to missing port
+ host = u.Host
+ port = ""
+ }
+
+ switch u.Scheme {
+ case "cldap":
+ if port == "" {
+ port = DefaultLdapPort
+ }
+ return dc.dialer.Dial("udp", net.JoinHostPort(host, port))
+ case "ldap":
+ if port == "" {
+ port = DefaultLdapPort
+ }
+ return dc.dialer.Dial("tcp", net.JoinHostPort(host, port))
+ case "ldaps":
+ if port == "" {
+ port = DefaultLdapsPort
+ }
+ return tls.DialWithDialer(dc.dialer, "tcp", net.JoinHostPort(host, port), dc.tlsConfig)
+ }
+
+ return nil, fmt.Errorf("Unknown scheme '%s'", u.Scheme)
+}
+
+// Dial connects to the given address on the given network using net.Dial
+// and then returns a new Conn for the connection.
+//
+// Deprecated: Use DialURL instead.
+func Dial(network, addr string) (*Conn, error) {
+ c, err := net.DialTimeout(network, addr, DefaultTimeout)
+ if err != nil {
+ return nil, NewError(ErrorNetwork, err)
+ }
+ conn := NewConn(c, false)
+ conn.Start()
+ return conn, nil
+}
+
+// DialTLS connects to the given address on the given network using tls.Dial
+// and then returns a new Conn for the connection.
+//
+// Deprecated: Use DialURL instead.
+func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
+ c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config)
+ if err != nil {
+ return nil, NewError(ErrorNetwork, err)
+ }
+ conn := NewConn(c, true)
+ conn.Start()
+ return conn, nil
+}
+
+// DialURL connects to the given ldap URL.
+// The following schemas are supported: ldap://, ldaps://, ldapi://,
+// and cldap:// (RFC1798, deprecated but used by Active Directory).
+// On success a new Conn for the connection is returned.
+func DialURL(addr string, opts ...DialOpt) (*Conn, error) {
+ u, err := url.Parse(addr)
+ if err != nil {
+ return nil, NewError(ErrorNetwork, err)
+ }
+
+ var dc DialContext
+ for _, opt := range opts {
+ opt(&dc)
+ }
+ if dc.dialer == nil {
+ dc.dialer = &net.Dialer{Timeout: DefaultTimeout}
+ }
+
+ c, err := dc.dial(u)
+ if err != nil {
+ return nil, NewError(ErrorNetwork, err)
+ }
+
+ conn := NewConn(c, u.Scheme == "ldaps")
+ conn.Start()
+ return conn, nil
+}
+
+// NewConn returns a new Conn using conn for network I/O.
+func NewConn(conn net.Conn, isTLS bool) *Conn {
+ l := &Conn{
+ conn: conn,
+ chanConfirm: make(chan struct{}),
+ chanMessageID: make(chan int64),
+ chanMessage: make(chan *messagePacket, 10),
+ messageContexts: map[int64]*messageContext{},
+ requestTimeout: 0,
+ isTLS: isTLS,
+ }
+ l.wgClose.Add(1)
+ return l
+}
+
+// Start initialises goroutines to read replies and process messages.
+// Warning: Calling this function in addition to Dial or DialURL
+// may cause race conditions.
+//
+// See: https://github.com/go-ldap/ldap/issues/356
+func (l *Conn) Start() {
+ go l.reader()
+ go l.processMessages()
+}
+
+// IsClosing returns whether or not we're currently closing.
+func (l *Conn) IsClosing() bool {
+ return atomic.LoadUint32(&l.closing) == 1
+}
+
+// setClosing sets the closing value to true
+func (l *Conn) setClosing() bool {
+ return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
+}
+
+// Close closes the connection.
+func (l *Conn) Close() (err error) {
+ l.messageMutex.Lock()
+ defer l.messageMutex.Unlock()
+
+ if l.setClosing() {
+ l.Debug.Printf("Sending quit message and waiting for confirmation")
+ l.chanMessage <- &messagePacket{Op: MessageQuit}
+
+ timeoutCtx := context.Background()
+ if l.getTimeout() > 0 {
+ var cancelFunc context.CancelFunc
+ timeoutCtx, cancelFunc = context.WithTimeout(timeoutCtx, time.Duration(l.getTimeout()))
+ defer cancelFunc()
+ }
+ select {
+ case <-l.chanConfirm:
+ // Confirmation was received.
+ case <-timeoutCtx.Done():
+ // The timeout was reached before confirmation was received.
+ }
+
+ close(l.chanMessage)
+
+ l.Debug.Printf("Closing network connection")
+ err = l.conn.Close()
+ l.wgClose.Done()
+ }
+ l.wgClose.Wait()
+
+ return err
+}
+
+// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
+func (l *Conn) SetTimeout(timeout time.Duration) {
+ atomic.StoreInt64(&l.requestTimeout, int64(timeout))
+}
+
+func (l *Conn) getTimeout() int64 {
+ return atomic.LoadInt64(&l.requestTimeout)
+}
+
+// Returns the next available messageID
+func (l *Conn) nextMessageID() int64 {
+ if messageID, ok := <-l.chanMessageID; ok {
+ return messageID
+ }
+ return 0
+}
+
+// GetLastError returns the last recorded error from goroutines like processMessages and reader.
+// Only the last recorded error will be returned.
+func (l *Conn) GetLastError() error {
+ l.messageMutex.Lock()
+ defer l.messageMutex.Unlock()
+ return l.err
+}
+
+// StartTLS sends the command to start a TLS session and then creates a new TLS Client
+func (l *Conn) StartTLS(config *tls.Config) error {
+ if l.isTLS {
+ return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
+ }
+
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
+ request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
+ packet.AppendChild(request)
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ l.Close()
+ return err
+ }
+ l.Debug.PrintPacket(packet)
+ }
+
+ if err := GetLDAPError(packet); err == nil {
+ conn := tls.Client(l.conn, config)
+
+ if connErr := conn.Handshake(); connErr != nil {
+ l.Close()
+ return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr))
+ }
+
+ l.isTLS = true
+ l.conn = conn
+ } else {
+ return err
+ }
+ go l.reader()
+
+ return nil
+}
+
+// TLSConnectionState returns the client's TLS connection state.
+// The return values are their zero values if StartTLS did
+// not succeed.
+func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
+ tc, ok := l.conn.(*tls.Conn)
+ if !ok {
+ return
+ }
+ return tc.ConnectionState(), true
+}
+
+func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
+ return l.sendMessageWithFlags(packet, 0)
+}
+
+func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
+ if l.IsClosing() {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
+ }
+ l.messageMutex.Lock()
+ l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
+ if l.isStartingTLS {
+ l.messageMutex.Unlock()
+ return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
+ }
+ if flags&startTLS != 0 {
+ if l.outstandingRequests != 0 {
+ l.messageMutex.Unlock()
+ return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
+ }
+ l.isStartingTLS = true
+ }
+ l.outstandingRequests++
+
+ l.messageMutex.Unlock()
+
+ responses := make(chan *PacketResponse)
+ messageID := packet.Children[0].Value.(int64)
+ message := &messagePacket{
+ Op: MessageRequest,
+ MessageID: messageID,
+ Packet: packet,
+ Context: &messageContext{
+ id: messageID,
+ done: make(chan struct{}),
+ responses: responses,
+ },
+ }
+ if !l.sendProcessMessage(message) {
+ if l.IsClosing() {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
+ }
+ return nil, NewError(ErrorNetwork, errors.New("ldap: could not send message for unknown reason"))
+ }
+ return message.Context, nil
+}
+
+func (l *Conn) finishMessage(msgCtx *messageContext) {
+ close(msgCtx.done)
+
+ if l.IsClosing() {
+ return
+ }
+
+ l.messageMutex.Lock()
+ l.outstandingRequests--
+ if l.isStartingTLS {
+ l.isStartingTLS = false
+ }
+ l.messageMutex.Unlock()
+
+ message := &messagePacket{
+ Op: MessageFinish,
+ MessageID: msgCtx.id,
+ }
+ l.sendProcessMessage(message)
+}
+
+func (l *Conn) sendProcessMessage(message *messagePacket) bool {
+ l.messageMutex.Lock()
+ defer l.messageMutex.Unlock()
+ if l.IsClosing() {
+ return false
+ }
+ l.chanMessage <- message
+ return true
+}
+
+func (l *Conn) processMessages() {
+ defer func() {
+ if err := recover(); err != nil {
+ l.err = fmt.Errorf("ldap: recovered panic in processMessages: %v", err)
+ }
+ for messageID, msgCtx := range l.messageContexts {
+ // If we are closing due to an error, inform anyone who
+ // is waiting about the error.
+ if l.IsClosing() && l.closeErr.Load() != nil {
+ msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)}, time.Duration(l.getTimeout()))
+ }
+ l.Debug.Printf("Closing channel for MessageID %d", messageID)
+ close(msgCtx.responses)
+ delete(l.messageContexts, messageID)
+ }
+ close(l.chanMessageID)
+ close(l.chanConfirm)
+ }()
+
+ var messageID int64 = 1
+ for {
+ select {
+ case l.chanMessageID <- messageID:
+ messageID++
+ case message := <-l.chanMessage:
+ switch message.Op {
+ case MessageQuit:
+ l.Debug.Printf("Shutting down - quit message received")
+ return
+ case MessageRequest:
+ // Add to message list and write to network
+ l.Debug.Printf("Sending message %d", message.MessageID)
+
+ buf := message.Packet.Bytes()
+ _, err := l.conn.Write(buf)
+ if err != nil {
+ l.Debug.Printf("Error Sending Message: %s", err.Error())
+ message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)}, time.Duration(l.getTimeout()))
+ close(message.Context.responses)
+ break
+ }
+
+ // Only add to messageContexts if we were able to
+ // successfully write the message.
+ l.messageContexts[message.MessageID] = message.Context
+
+ // Add timeout if defined
+ requestTimeout := l.getTimeout()
+ if requestTimeout > 0 {
+ go func() {
+ timer := time.NewTimer(time.Duration(requestTimeout))
+ defer func() {
+ if err := recover(); err != nil {
+ l.err = fmt.Errorf("ldap: recovered panic in RequestTimeout: %v", err)
+ }
+
+ timer.Stop()
+ }()
+
+ select {
+ case <-timer.C:
+ timeoutMessage := &messagePacket{
+ Op: MessageTimeout,
+ MessageID: message.MessageID,
+ }
+ l.sendProcessMessage(timeoutMessage)
+ case <-message.Context.done:
+ }
+ }()
+ }
+ case MessageResponse:
+ l.Debug.Printf("Receiving message %d", message.MessageID)
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ msgCtx.sendResponse(&PacketResponse{message.Packet, nil}, time.Duration(l.getTimeout()))
+ } else {
+ l.err = fmt.Errorf("ldap: received unexpected message %d, %v", message.MessageID, l.IsClosing())
+ l.Debug.PrintPacket(message.Packet)
+ }
+ case MessageTimeout:
+ // Handle the timeout by closing the channel
+ // All reads will return immediately
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
+ msgCtx.sendResponse(&PacketResponse{message.Packet, NewError(ErrorNetwork, errors.New("ldap: connection timed out"))}, time.Duration(l.getTimeout()))
+ delete(l.messageContexts, message.MessageID)
+ close(msgCtx.responses)
+ }
+ case MessageFinish:
+ l.Debug.Printf("Finished message %d", message.MessageID)
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ delete(l.messageContexts, message.MessageID)
+ close(msgCtx.responses)
+ }
+ }
+ }
+ }
+}
+
+func (l *Conn) reader() {
+ cleanstop := false
+ defer func() {
+ if err := recover(); err != nil {
+ l.err = fmt.Errorf("ldap: recovered panic in reader: %v", err)
+ }
+ if !cleanstop {
+ l.Close()
+ }
+ }()
+
+ bufConn := bufio.NewReader(l.conn)
+ for {
+ if cleanstop {
+ l.Debug.Printf("reader clean stopping (without closing the connection)")
+ return
+ }
+ packet, err := ber.ReadPacket(bufConn)
+ if err != nil {
+ // A read error is expected here if we are closing the connection...
+ if !l.IsClosing() {
+ l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
+ l.Debug.Printf("reader error: %s", err)
+ }
+ return
+ }
+ if err := addLDAPDescriptions(packet); err != nil {
+ l.Debug.Printf("descriptions error: %s", err)
+ }
+ if len(packet.Children) == 0 {
+ l.Debug.Printf("Received bad ldap packet")
+ continue
+ }
+ l.messageMutex.Lock()
+ if l.isStartingTLS {
+ cleanstop = true
+ }
+ l.messageMutex.Unlock()
+ message := &messagePacket{
+ Op: MessageResponse,
+ MessageID: packet.Children[0].Value.(int64),
+ Packet: packet,
+ }
+ if !l.sendProcessMessage(message) {
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/control.go b/vendor/github.com/go-ldap/ldap/v3/control.go
new file mode 100644
index 0000000..f1c2746
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/control.go
@@ -0,0 +1,1297 @@
+package ldap
+
+import (
+ "fmt"
+ "strconv"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+ "github.com/google/uuid"
+)
+
+const (
+ // ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
+ ControlTypePaging = "1.2.840.113556.1.4.319"
+ // ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
+ ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
+ // ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+ ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
+ // ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+ ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
+ // ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
+ ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
+ // ControlTypeWhoAmI - https://tools.ietf.org/html/rfc4532
+ ControlTypeWhoAmI = "1.3.6.1.4.1.4203.1.11.3"
+ // ControlTypeSubtreeDelete - https://datatracker.ietf.org/doc/html/draft-armijo-ldap-treedelete-02
+ ControlTypeSubtreeDelete = "1.2.840.113556.1.4.805"
+
+ // ControlTypeServerSideSorting - https://www.ietf.org/rfc/rfc2891.txt
+ ControlTypeServerSideSorting = "1.2.840.113556.1.4.473"
+ // ControlTypeServerSideSorting - https://www.ietf.org/rfc/rfc2891.txt
+ ControlTypeServerSideSortingResult = "1.2.840.113556.1.4.474"
+
+ // ControlTypeMicrosoftNotification - https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx
+ ControlTypeMicrosoftNotification = "1.2.840.113556.1.4.528"
+ // ControlTypeMicrosoftShowDeleted - https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx
+ ControlTypeMicrosoftShowDeleted = "1.2.840.113556.1.4.417"
+ // ControlTypeMicrosoftServerLinkTTL - https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-adts/f4f523a8-abc0-4b3a-a471-6b2fef135481?redirectedfrom=MSDN
+ ControlTypeMicrosoftServerLinkTTL = "1.2.840.113556.1.4.2309"
+ // ControlTypeDirSync - Active Directory DirSync - https://msdn.microsoft.com/en-us/library/aa366978(v=vs.85).aspx
+ ControlTypeDirSync = "1.2.840.113556.1.4.841"
+
+ // ControlTypeSyncRequest - https://www.ietf.org/rfc/rfc4533.txt
+ ControlTypeSyncRequest = "1.3.6.1.4.1.4203.1.9.1.1"
+ // ControlTypeSyncState - https://www.ietf.org/rfc/rfc4533.txt
+ ControlTypeSyncState = "1.3.6.1.4.1.4203.1.9.1.2"
+ // ControlTypeSyncDone - https://www.ietf.org/rfc/rfc4533.txt
+ ControlTypeSyncDone = "1.3.6.1.4.1.4203.1.9.1.3"
+ // ControlTypeSyncInfo - https://www.ietf.org/rfc/rfc4533.txt
+ ControlTypeSyncInfo = "1.3.6.1.4.1.4203.1.9.1.4"
+)
+
+// Flags for DirSync control
+const (
+ DirSyncIncrementalValues int64 = 2147483648
+ DirSyncPublicDataOnly int64 = 8192
+ DirSyncAncestorsFirstOrder int64 = 2048
+ DirSyncObjectSecurity int64 = 1
+)
+
+// ControlTypeMap maps controls to text descriptions
+var ControlTypeMap = map[string]string{
+ ControlTypePaging: "Paging",
+ ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
+ ControlTypeManageDsaIT: "Manage DSA IT",
+ ControlTypeSubtreeDelete: "Subtree Delete Control",
+ ControlTypeMicrosoftNotification: "Change Notification - Microsoft",
+ ControlTypeMicrosoftShowDeleted: "Show Deleted Objects - Microsoft",
+ ControlTypeMicrosoftServerLinkTTL: "Return TTL-DNs for link values with associated expiry times - Microsoft",
+ ControlTypeServerSideSorting: "Server Side Sorting Request - LDAP Control Extension for Server Side Sorting of Search Results (RFC2891)",
+ ControlTypeServerSideSortingResult: "Server Side Sorting Results - LDAP Control Extension for Server Side Sorting of Search Results (RFC2891)",
+ ControlTypeDirSync: "DirSync",
+ ControlTypeSyncRequest: "Sync Request",
+ ControlTypeSyncState: "Sync State",
+ ControlTypeSyncDone: "Sync Done",
+ ControlTypeSyncInfo: "Sync Info",
+}
+
+// Control defines an interface controls provide to encode and describe themselves
+type Control interface {
+ // GetControlType returns the OID
+ GetControlType() string
+ // Encode returns the ber packet representation
+ Encode() *ber.Packet
+ // String returns a human-readable description
+ String() string
+}
+
+// ControlString implements the Control interface for simple controls
+type ControlString struct {
+ ControlType string
+ Criticality bool
+ ControlValue string
+}
+
+// GetControlType returns the OID
+func (c *ControlString) GetControlType() string {
+ return c.ControlType
+}
+
+// Encode returns the ber packet representation
+func (c *ControlString) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
+ if c.Criticality {
+ packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+ }
+ if c.ControlValue != "" {
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value"))
+ }
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlString) String() string {
+ return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
+}
+
+// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
+type ControlPaging struct {
+ // PagingSize indicates the page size
+ PagingSize uint32
+ // Cookie is an opaque value returned by the server to track a paging cursor
+ Cookie []byte
+}
+
+// GetControlType returns the OID
+func (c *ControlPaging) GetControlType() string {
+ return ControlTypePaging
+}
+
+// Encode returns the ber packet representation
+func (c *ControlPaging) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
+
+ p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)")
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value")
+ seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size"))
+ cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
+ cookie.Value = c.Cookie
+ cookie.Data.Write(c.Cookie)
+ seq.AppendChild(cookie)
+ p2.AppendChild(seq)
+
+ packet.AppendChild(p2)
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlPaging) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q",
+ ControlTypeMap[ControlTypePaging],
+ ControlTypePaging,
+ false,
+ c.PagingSize,
+ c.Cookie)
+}
+
+// SetCookie stores the given cookie in the paging control
+func (c *ControlPaging) SetCookie(cookie []byte) {
+ c.Cookie = cookie
+}
+
+// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
+type ControlBeheraPasswordPolicy struct {
+ // Expire contains the number of seconds before a password will expire
+ Expire int64
+ // Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
+ Grace int64
+ // Error indicates the error code
+ Error int8
+ // ErrorString is a human readable error
+ ErrorString string
+}
+
+// GetControlType returns the OID
+func (c *ControlBeheraPasswordPolicy) GetControlType() string {
+ return ControlTypeBeheraPasswordPolicy
+}
+
+// Encode returns the ber packet representation
+func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
+
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlBeheraPasswordPolicy) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s",
+ ControlTypeMap[ControlTypeBeheraPasswordPolicy],
+ ControlTypeBeheraPasswordPolicy,
+ false,
+ c.Expire,
+ c.Grace,
+ c.Error,
+ c.ErrorString)
+}
+
+// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+type ControlVChuPasswordMustChange struct {
+ // MustChange indicates if the password is required to be changed
+ MustChange bool
+}
+
+// GetControlType returns the OID
+func (c *ControlVChuPasswordMustChange) GetControlType() string {
+ return ControlTypeVChuPasswordMustChange
+}
+
+// Encode returns the ber packet representation
+func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlVChuPasswordMustChange) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t MustChange: %v",
+ ControlTypeMap[ControlTypeVChuPasswordMustChange],
+ ControlTypeVChuPasswordMustChange,
+ false,
+ c.MustChange)
+}
+
+// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+type ControlVChuPasswordWarning struct {
+ // Expire indicates the time in seconds until the password expires
+ Expire int64
+}
+
+// GetControlType returns the OID
+func (c *ControlVChuPasswordWarning) GetControlType() string {
+ return ControlTypeVChuPasswordWarning
+}
+
+// Encode returns the ber packet representation
+func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlVChuPasswordWarning) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Expire: %b",
+ ControlTypeMap[ControlTypeVChuPasswordWarning],
+ ControlTypeVChuPasswordWarning,
+ false,
+ c.Expire)
+}
+
+// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
+type ControlManageDsaIT struct {
+ // Criticality indicates if this control is required
+ Criticality bool
+}
+
+// GetControlType returns the OID
+func (c *ControlManageDsaIT) GetControlType() string {
+ return ControlTypeManageDsaIT
+}
+
+// Encode returns the ber packet representation
+func (c *ControlManageDsaIT) Encode() *ber.Packet {
+ // FIXME
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")"))
+ if c.Criticality {
+ packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+ }
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlManageDsaIT) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t",
+ ControlTypeMap[ControlTypeManageDsaIT],
+ ControlTypeManageDsaIT,
+ c.Criticality)
+}
+
+// NewControlManageDsaIT returns a ControlManageDsaIT control
+func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
+ return &ControlManageDsaIT{Criticality: Criticality}
+}
+
+// ControlMicrosoftNotification implements the control described in https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx
+type ControlMicrosoftNotification struct{}
+
+// GetControlType returns the OID
+func (c *ControlMicrosoftNotification) GetControlType() string {
+ return ControlTypeMicrosoftNotification
+}
+
+// Encode returns the ber packet representation
+func (c *ControlMicrosoftNotification) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftNotification, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftNotification]+")"))
+
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlMicrosoftNotification) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q)",
+ ControlTypeMap[ControlTypeMicrosoftNotification],
+ ControlTypeMicrosoftNotification)
+}
+
+// NewControlMicrosoftNotification returns a ControlMicrosoftNotification control
+func NewControlMicrosoftNotification() *ControlMicrosoftNotification {
+ return &ControlMicrosoftNotification{}
+}
+
+// ControlMicrosoftShowDeleted implements the control described in https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx
+type ControlMicrosoftShowDeleted struct{}
+
+// GetControlType returns the OID
+func (c *ControlMicrosoftShowDeleted) GetControlType() string {
+ return ControlTypeMicrosoftShowDeleted
+}
+
+// Encode returns the ber packet representation
+func (c *ControlMicrosoftShowDeleted) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftShowDeleted, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftShowDeleted]+")"))
+
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlMicrosoftShowDeleted) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q)",
+ ControlTypeMap[ControlTypeMicrosoftShowDeleted],
+ ControlTypeMicrosoftShowDeleted)
+}
+
+// NewControlMicrosoftShowDeleted returns a ControlMicrosoftShowDeleted control
+func NewControlMicrosoftShowDeleted() *ControlMicrosoftShowDeleted {
+ return &ControlMicrosoftShowDeleted{}
+}
+
+// ControlMicrosoftServerLinkTTL implements the control described in https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-adts/f4f523a8-abc0-4b3a-a471-6b2fef135481?redirectedfrom=MSDN
+type ControlMicrosoftServerLinkTTL struct{}
+
+// GetControlType returns the OID
+func (c *ControlMicrosoftServerLinkTTL) GetControlType() string {
+ return ControlTypeMicrosoftServerLinkTTL
+}
+
+// Encode returns the ber packet representation
+func (c *ControlMicrosoftServerLinkTTL) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftServerLinkTTL, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftServerLinkTTL]+")"))
+
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlMicrosoftServerLinkTTL) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q)",
+ ControlTypeMap[ControlTypeMicrosoftServerLinkTTL],
+ ControlTypeMicrosoftServerLinkTTL)
+}
+
+// NewControlMicrosoftServerLinkTTL returns a ControlMicrosoftServerLinkTTL control
+func NewControlMicrosoftServerLinkTTL() *ControlMicrosoftServerLinkTTL {
+ return &ControlMicrosoftServerLinkTTL{}
+}
+
+// FindControl returns the first control of the given type in the list, or nil
+func FindControl(controls []Control, controlType string) Control {
+ for _, c := range controls {
+ if c.GetControlType() == controlType {
+ return c
+ }
+ }
+ return nil
+}
+
+// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
+func DecodeControl(packet *ber.Packet) (Control, error) {
+ var (
+ ControlType = ""
+ Criticality = false
+ value *ber.Packet
+ )
+
+ switch len(packet.Children) {
+ case 0:
+ // at least one child is required for control type
+ return nil, fmt.Errorf("at least one child is required for control type")
+
+ case 1:
+ // just type, no criticality or value
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ ControlType = packet.Children[0].Value.(string)
+
+ case 2:
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ if packet.Children[0].Value != nil {
+ ControlType = packet.Children[0].Value.(string)
+ } else if packet.Children[0].Data != nil {
+ ControlType = packet.Children[0].Data.String()
+ } else {
+ return nil, fmt.Errorf("not found where to get the control type")
+ }
+
+ // Children[1] could be criticality or value (both are optional)
+ // duck-type on whether this is a boolean
+ if _, ok := packet.Children[1].Value.(bool); ok {
+ packet.Children[1].Description = "Criticality"
+ Criticality = packet.Children[1].Value.(bool)
+ } else {
+ packet.Children[1].Description = "Control Value"
+ value = packet.Children[1]
+ }
+
+ case 3:
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ ControlType = packet.Children[0].Value.(string)
+
+ packet.Children[1].Description = "Criticality"
+ Criticality = packet.Children[1].Value.(bool)
+
+ packet.Children[2].Description = "Control Value"
+ value = packet.Children[2]
+
+ default:
+ // more than 3 children is invalid
+ return nil, fmt.Errorf("more than 3 children is invalid for controls")
+ }
+
+ switch ControlType {
+ case ControlTypeManageDsaIT:
+ return NewControlManageDsaIT(Criticality), nil
+ case ControlTypePaging:
+ value.Description += " (Paging)"
+ c := new(ControlPaging)
+ if value.Value != nil {
+ valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+ value = value.Children[0]
+ value.Description = "Search Control Value"
+ value.Children[0].Description = "Paging Size"
+ value.Children[1].Description = "Cookie"
+ c.PagingSize = uint32(value.Children[0].Value.(int64))
+ c.Cookie = value.Children[1].Data.Bytes()
+ value.Children[1].Value = c.Cookie
+ return c, nil
+ case ControlTypeBeheraPasswordPolicy:
+ value.Description += " (Password Policy - Behera)"
+ c := NewControlBeheraPasswordPolicy()
+ if value.Value != nil {
+ valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+
+ sequence := value.Children[0]
+
+ for _, child := range sequence.Children {
+ if child.Tag == 0 {
+ // Warning
+ warningPacket := child.Children[0]
+ val, err := ber.ParseInt64(warningPacket.Data.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ if warningPacket.Tag == 0 {
+ // timeBeforeExpiration
+ c.Expire = val
+ warningPacket.Value = c.Expire
+ } else if warningPacket.Tag == 1 {
+ // graceAuthNsRemaining
+ c.Grace = val
+ warningPacket.Value = c.Grace
+ }
+ } else if child.Tag == 1 {
+ // Error
+ bs := child.Data.Bytes()
+ if len(bs) != 1 || bs[0] > 8 {
+ return nil, fmt.Errorf("failed to decode data bytes: %s", "invalid PasswordPolicyResponse enum value")
+ }
+ val := int8(bs[0])
+ c.Error = val
+ child.Value = c.Error
+ c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error]
+ }
+ }
+ return c, nil
+ case ControlTypeVChuPasswordMustChange:
+ c := &ControlVChuPasswordMustChange{MustChange: true}
+ return c, nil
+ case ControlTypeVChuPasswordWarning:
+ c := &ControlVChuPasswordWarning{Expire: -1}
+ expireStr := ber.DecodeString(value.Data.Bytes())
+
+ expire, err := strconv.ParseInt(expireStr, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse value as int: %s", err)
+ }
+ c.Expire = expire
+ value.Value = c.Expire
+
+ return c, nil
+ case ControlTypeMicrosoftNotification:
+ return NewControlMicrosoftNotification(), nil
+ case ControlTypeMicrosoftShowDeleted:
+ return NewControlMicrosoftShowDeleted(), nil
+ case ControlTypeMicrosoftServerLinkTTL:
+ return NewControlMicrosoftServerLinkTTL(), nil
+ case ControlTypeSubtreeDelete:
+ return NewControlSubtreeDelete(), nil
+ case ControlTypeServerSideSorting:
+ return NewControlServerSideSorting(value)
+ case ControlTypeServerSideSortingResult:
+ return NewControlServerSideSortingResult(value)
+ case ControlTypeDirSync:
+ value.Description += " (DirSync)"
+ return NewResponseControlDirSync(value)
+ case ControlTypeSyncState:
+ value.Description += " (Sync State)"
+ valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ return NewControlSyncState(valueChildren)
+ case ControlTypeSyncDone:
+ value.Description += " (Sync Done)"
+ valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ return NewControlSyncDone(valueChildren)
+ case ControlTypeSyncInfo:
+ value.Description += " (Sync Info)"
+ valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ return NewControlSyncInfo(valueChildren)
+ default:
+ c := new(ControlString)
+ c.ControlType = ControlType
+ c.Criticality = Criticality
+ if value != nil {
+ c.ControlValue = value.Value.(string)
+ }
+ return c, nil
+ }
+}
+
+// NewControlString returns a generic control
+func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
+ return &ControlString{
+ ControlType: controlType,
+ Criticality: criticality,
+ ControlValue: controlValue,
+ }
+}
+
+// NewControlPaging returns a paging control
+func NewControlPaging(pagingSize uint32) *ControlPaging {
+ return &ControlPaging{PagingSize: pagingSize}
+}
+
+// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
+func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
+ return &ControlBeheraPasswordPolicy{
+ Expire: -1,
+ Grace: -1,
+ Error: -1,
+ }
+}
+
+// ControlSubtreeDelete implements the subtree delete control described in
+// https://datatracker.ietf.org/doc/html/draft-armijo-ldap-treedelete-02
+type ControlSubtreeDelete struct{}
+
+// GetControlType returns the OID
+func (c *ControlSubtreeDelete) GetControlType() string {
+ return ControlTypeSubtreeDelete
+}
+
+// NewControlSubtreeDelete returns a ControlSubtreeDelete control.
+func NewControlSubtreeDelete() *ControlSubtreeDelete {
+ return &ControlSubtreeDelete{}
+}
+
+// Encode returns the ber packet representation
+func (c *ControlSubtreeDelete) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeSubtreeDelete, "Control Type ("+ControlTypeMap[ControlTypeSubtreeDelete]+")"))
+
+ return packet
+}
+
+func (c *ControlSubtreeDelete) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q)",
+ ControlTypeMap[ControlTypeSubtreeDelete],
+ ControlTypeSubtreeDelete)
+}
+
+func encodeControls(controls []Control) *ber.Packet {
+ packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls")
+ for _, control := range controls {
+ packet.AppendChild(control.Encode())
+ }
+ return packet
+}
+
+// ControlDirSync implements the control described in https://msdn.microsoft.com/en-us/library/aa366978(v=vs.85).aspx
+type ControlDirSync struct {
+ Criticality bool
+ Flags int64
+ MaxAttrCount int64
+ Cookie []byte
+}
+
+// Deprecated: Use NewRequestControlDirSync instead
+func NewControlDirSync(flags int64, maxAttrCount int64, cookie []byte) *ControlDirSync {
+ return NewRequestControlDirSync(flags, maxAttrCount, cookie)
+}
+
+// NewRequestControlDirSync returns a dir sync control
+func NewRequestControlDirSync(
+ flags int64, maxAttrCount int64, cookie []byte,
+) *ControlDirSync {
+ return &ControlDirSync{
+ Criticality: true,
+ Flags: flags,
+ MaxAttrCount: maxAttrCount,
+ Cookie: cookie,
+ }
+}
+
+// NewResponseControlDirSync returns a dir sync control
+func NewResponseControlDirSync(value *ber.Packet) (*ControlDirSync, error) {
+ if value.Value != nil {
+ valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+ child := value.Children[0]
+ if len(child.Children) != 3 { // also on initial creation, Cookie is an empty string
+ return nil, fmt.Errorf("invalid number of children in dirSync control")
+ }
+ child.Description = "DirSync Control Value"
+ child.Children[0].Description = "Flags"
+ child.Children[1].Description = "MaxAttrCount"
+ child.Children[2].Description = "Cookie"
+
+ cookie := child.Children[2].Data.Bytes()
+ child.Children[2].Value = cookie
+ return &ControlDirSync{
+ Criticality: true,
+ Flags: child.Children[0].Value.(int64),
+ MaxAttrCount: child.Children[1].Value.(int64),
+ Cookie: cookie,
+ }, nil
+}
+
+// GetControlType returns the OID
+func (c *ControlDirSync) GetControlType() string {
+ return ControlTypeDirSync
+}
+
+// String returns a human-readable description
+func (c *ControlDirSync) String() string {
+ return fmt.Sprintf(
+ "ControlType: %s (%q) Criticality: %t ControlValue: Flags: %d MaxAttrCount: %d",
+ ControlTypeMap[ControlTypeDirSync],
+ ControlTypeDirSync,
+ c.Criticality,
+ c.Flags,
+ c.MaxAttrCount,
+ )
+}
+
+// Encode returns the ber packet representation
+func (c *ControlDirSync) Encode() *ber.Packet {
+ cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, "", "Cookie")
+ if len(c.Cookie) != 0 {
+ cookie.Value = c.Cookie
+ cookie.Data.Write(c.Cookie)
+ }
+
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeDirSync, "Control Type ("+ControlTypeMap[ControlTypeDirSync]+")"))
+ packet.AppendChild(ber.NewLDAPBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) // must be true always
+
+ val := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (DirSync)")
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "DirSync Control Value")
+ seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.Flags), "Flags"))
+ seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.MaxAttrCount), "MaxAttrCount"))
+ seq.AppendChild(cookie)
+ val.AppendChild(seq)
+
+ packet.AppendChild(val)
+ return packet
+}
+
+// SetCookie stores the given cookie in the dirSync control
+func (c *ControlDirSync) SetCookie(cookie []byte) {
+ c.Cookie = cookie
+}
+
+// ControlServerSideSorting
+
+type SortKey struct {
+ Reverse bool
+ AttributeType string
+ MatchingRule string
+}
+
+type ControlServerSideSorting struct {
+ SortKeys []*SortKey
+}
+
+func (c *ControlServerSideSorting) GetControlType() string {
+ return ControlTypeServerSideSorting
+}
+
+func NewControlServerSideSorting(value *ber.Packet) (*ControlServerSideSorting, error) {
+ sortKeys := []*SortKey{}
+
+ val := value.Children[1].Children
+
+ if len(val) != 1 {
+ return nil, fmt.Errorf("no sequence value in packet")
+ }
+
+ sequences := val[0].Children
+
+ for i, sequence := range sequences {
+ sortKey := new(SortKey)
+
+ if len(sequence.Children) < 2 {
+ return nil, fmt.Errorf("attributeType or matchingRule is missing from sequence %d", i)
+ }
+
+ sortKey.AttributeType = sequence.Children[0].Value.(string)
+ sortKey.MatchingRule = sequence.Children[1].Value.(string)
+
+ if len(sequence.Children) == 3 {
+ sortKey.Reverse = sequence.Children[2].Value.(bool)
+ }
+
+ sortKeys = append(sortKeys, sortKey)
+ }
+
+ return &ControlServerSideSorting{SortKeys: sortKeys}, nil
+}
+
+func NewControlServerSideSortingWithSortKeys(sortKeys []*SortKey) *ControlServerSideSorting {
+ return &ControlServerSideSorting{SortKeys: sortKeys}
+}
+
+func (c *ControlServerSideSorting) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ control := ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.GetControlType(), "Control Type")
+
+ value := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value")
+ seqs := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "SortKeyList")
+
+ for _, f := range c.SortKeys {
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "")
+
+ seq.AppendChild(
+ ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, f.AttributeType, "attributeType"),
+ )
+ seq.AppendChild(
+ ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, f.MatchingRule, "orderingRule"),
+ )
+ if f.Reverse {
+ seq.AppendChild(
+ ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, 1, f.Reverse, "reverseOrder"),
+ )
+ }
+
+ seqs.AppendChild(seq)
+ }
+
+ value.AppendChild(seqs)
+
+ packet.AppendChild(control)
+ packet.AppendChild(value)
+
+ return packet
+}
+
+func (c *ControlServerSideSorting) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality:%t %+v",
+ "Server Side Sorting",
+ c.GetControlType(),
+ false,
+ c.SortKeys,
+ )
+}
+
+// ControlServerSideSortingResponse
+
+const (
+ ControlServerSideSortingCodeSuccess ControlServerSideSortingCode = 0
+ ControlServerSideSortingCodeOperationsError ControlServerSideSortingCode = 1
+ ControlServerSideSortingCodeTimeLimitExceeded ControlServerSideSortingCode = 2
+ ControlServerSideSortingCodeStrongAuthRequired ControlServerSideSortingCode = 8
+ ControlServerSideSortingCodeAdminLimitExceeded ControlServerSideSortingCode = 11
+ ControlServerSideSortingCodeNoSuchAttribute ControlServerSideSortingCode = 16
+ ControlServerSideSortingCodeInappropriateMatching ControlServerSideSortingCode = 18
+ ControlServerSideSortingCodeInsufficientAccessRights ControlServerSideSortingCode = 50
+ ControlServerSideSortingCodeBusy ControlServerSideSortingCode = 51
+ ControlServerSideSortingCodeUnwillingToPerform ControlServerSideSortingCode = 53
+ ControlServerSideSortingCodeOther ControlServerSideSortingCode = 80
+)
+
+var ControlServerSideSortingCodes = []ControlServerSideSortingCode{
+ ControlServerSideSortingCodeSuccess,
+ ControlServerSideSortingCodeOperationsError,
+ ControlServerSideSortingCodeTimeLimitExceeded,
+ ControlServerSideSortingCodeStrongAuthRequired,
+ ControlServerSideSortingCodeAdminLimitExceeded,
+ ControlServerSideSortingCodeNoSuchAttribute,
+ ControlServerSideSortingCodeInappropriateMatching,
+ ControlServerSideSortingCodeInsufficientAccessRights,
+ ControlServerSideSortingCodeBusy,
+ ControlServerSideSortingCodeUnwillingToPerform,
+ ControlServerSideSortingCodeOther,
+}
+
+type ControlServerSideSortingCode int64
+
+// Valid test the code contained in the control against the ControlServerSideSortingCodes slice and return an error if the code is unknown.
+func (c ControlServerSideSortingCode) Valid() error {
+ for _, validRet := range ControlServerSideSortingCodes {
+ if c == validRet {
+ return nil
+ }
+ }
+ return fmt.Errorf("unknown return code : %d", c)
+}
+
+func NewControlServerSideSortingResult(pkt *ber.Packet) (*ControlServerSideSortingResult, error) {
+ control := new(ControlServerSideSortingResult)
+
+ if pkt == nil || len(pkt.Children) == 0 {
+ // This is currently not compliant with the ServerSideSorting RFC (see https://datatracker.ietf.org/doc/html/rfc2891#section-1.2).
+ // but it's necessary because there seems to be a bug in the implementation of the popular OpenLDAP server.
+ //
+ // See: https://github.com/go-ldap/ldap/pull/546
+ return control, nil
+ }
+
+ codeInt, err := ber.ParseInt64(pkt.Children[0].Data.Bytes())
+ if err != nil {
+ return nil, err
+ }
+
+ if err = ControlServerSideSortingCode(codeInt).Valid(); err != nil {
+ return nil, err
+ }
+
+ return control, nil
+}
+
+type ControlServerSideSortingResult struct {
+ Criticality bool
+
+ Result ControlServerSideSortingCode
+
+ // Not populated for now. I can't get openldap to send me this value, so I think this is specific to other directory server
+ // AttributeType string
+}
+
+func (control *ControlServerSideSortingResult) GetControlType() string {
+ return ControlTypeServerSideSortingResult
+}
+
+func (c *ControlServerSideSortingResult) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "SortResult sequence")
+ sortResult := ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, int64(c.Result), "SortResult")
+ packet.AppendChild(sortResult)
+
+ return packet
+}
+
+func (c *ControlServerSideSortingResult) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality:%t ResultCode:%+v",
+ "Server Side Sorting Result",
+ c.GetControlType(),
+ c.Criticality,
+ c.Result,
+ )
+}
+
+// Mode for ControlTypeSyncRequest
+type ControlSyncRequestMode int64
+
+const (
+ SyncRequestModeRefreshOnly ControlSyncRequestMode = 1
+ SyncRequestModeRefreshAndPersist ControlSyncRequestMode = 3
+)
+
+// ControlSyncRequest implements the Sync Request Control described in https://www.ietf.org/rfc/rfc4533.txt
+type ControlSyncRequest struct {
+ Criticality bool
+ Mode ControlSyncRequestMode
+ Cookie []byte
+ ReloadHint bool
+}
+
+func NewControlSyncRequest(
+ mode ControlSyncRequestMode, cookie []byte, reloadHint bool,
+) *ControlSyncRequest {
+ return &ControlSyncRequest{
+ Criticality: true,
+ Mode: mode,
+ Cookie: cookie,
+ ReloadHint: reloadHint,
+ }
+}
+
+// GetControlType returns the OID
+func (c *ControlSyncRequest) GetControlType() string {
+ return ControlTypeSyncRequest
+}
+
+// Encode encodes the control
+func (c *ControlSyncRequest) Encode() *ber.Packet {
+ _mode := int64(c.Mode)
+ mode := ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, _mode, "Mode")
+ var cookie *ber.Packet
+ if len(c.Cookie) > 0 {
+ cookie = ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
+ cookie.Value = c.Cookie
+ cookie.Data.Write(c.Cookie)
+ }
+ reloadHint := ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.ReloadHint, "Reload Hint")
+
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeSyncRequest, "Control Type ("+ControlTypeMap[ControlTypeSyncRequest]+")"))
+ packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+
+ val := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Sync Request)")
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Sync Request Value")
+ seq.AppendChild(mode)
+ if cookie != nil {
+ seq.AppendChild(cookie)
+ }
+ seq.AppendChild(reloadHint)
+ val.AppendChild(seq)
+
+ packet.AppendChild(val)
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlSyncRequest) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Mode: %d Cookie: %s ReloadHint: %t",
+ ControlTypeMap[ControlTypeSyncRequest],
+ ControlTypeSyncRequest,
+ c.Criticality,
+ c.Mode,
+ string(c.Cookie),
+ c.ReloadHint,
+ )
+}
+
+// State for ControlSyncState
+type ControlSyncStateState int64
+
+const (
+ SyncStatePresent ControlSyncStateState = 0
+ SyncStateAdd ControlSyncStateState = 1
+ SyncStateModify ControlSyncStateState = 2
+ SyncStateDelete ControlSyncStateState = 3
+)
+
+// ControlSyncState implements the Sync State Control described in https://www.ietf.org/rfc/rfc4533.txt
+type ControlSyncState struct {
+ Criticality bool
+ State ControlSyncStateState
+ EntryUUID uuid.UUID
+ Cookie []byte
+}
+
+func NewControlSyncState(pkt *ber.Packet) (*ControlSyncState, error) {
+ var (
+ state ControlSyncStateState
+ entryUUID uuid.UUID
+ cookie []byte
+ err error
+ )
+ switch len(pkt.Children) {
+ case 0, 1:
+ return nil, fmt.Errorf("at least two children are required: %d", len(pkt.Children))
+ case 2:
+ state = ControlSyncStateState(pkt.Children[0].Value.(int64))
+ entryUUID, err = uuid.FromBytes(pkt.Children[1].ByteValue)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode uuid: %w", err)
+ }
+ case 3:
+ state = ControlSyncStateState(pkt.Children[0].Value.(int64))
+ entryUUID, err = uuid.FromBytes(pkt.Children[1].ByteValue)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode uuid: %w", err)
+ }
+ cookie = pkt.Children[2].ByteValue
+ }
+ return &ControlSyncState{
+ Criticality: false,
+ State: state,
+ EntryUUID: entryUUID,
+ Cookie: cookie,
+ }, nil
+}
+
+// GetControlType returns the OID
+func (c *ControlSyncState) GetControlType() string {
+ return ControlTypeSyncState
+}
+
+// Encode encodes the control
+func (c *ControlSyncState) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlSyncState) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t State: %d EntryUUID: %s Cookie: %s",
+ ControlTypeMap[ControlTypeSyncState],
+ ControlTypeSyncState,
+ c.Criticality,
+ c.State,
+ c.EntryUUID.String(),
+ string(c.Cookie),
+ )
+}
+
+// ControlSyncDone implements the Sync Done Control described in https://www.ietf.org/rfc/rfc4533.txt
+type ControlSyncDone struct {
+ Criticality bool
+ Cookie []byte
+ RefreshDeletes bool
+}
+
+func NewControlSyncDone(pkt *ber.Packet) (*ControlSyncDone, error) {
+ var (
+ cookie []byte
+ refreshDeletes bool
+ )
+ switch len(pkt.Children) {
+ case 0:
+ // have nothing to do
+ case 1:
+ cookie = pkt.Children[0].ByteValue
+ case 2:
+ cookie = pkt.Children[0].ByteValue
+ refreshDeletes = pkt.Children[1].Value.(bool)
+ }
+ return &ControlSyncDone{
+ Criticality: false,
+ Cookie: cookie,
+ RefreshDeletes: refreshDeletes,
+ }, nil
+}
+
+// GetControlType returns the OID
+func (c *ControlSyncDone) GetControlType() string {
+ return ControlTypeSyncDone
+}
+
+// Encode encodes the control
+func (c *ControlSyncDone) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlSyncDone) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Cookie: %s RefreshDeletes: %t",
+ ControlTypeMap[ControlTypeSyncDone],
+ ControlTypeSyncDone,
+ c.Criticality,
+ string(c.Cookie),
+ c.RefreshDeletes,
+ )
+}
+
+// Tag For ControlSyncInfo
+type ControlSyncInfoValue uint64
+
+const (
+ SyncInfoNewcookie ControlSyncInfoValue = 0
+ SyncInfoRefreshDelete ControlSyncInfoValue = 1
+ SyncInfoRefreshPresent ControlSyncInfoValue = 2
+ SyncInfoSyncIdSet ControlSyncInfoValue = 3
+)
+
+// ControlSyncInfoNewCookie implements a part of syncInfoValue described in https://www.ietf.org/rfc/rfc4533.txt
+type ControlSyncInfoNewCookie struct {
+ Cookie []byte
+}
+
+// String returns a human-readable description
+func (c *ControlSyncInfoNewCookie) String() string {
+ return fmt.Sprintf(
+ "NewCookie[Cookie: %s]",
+ string(c.Cookie),
+ )
+}
+
+// ControlSyncInfoRefreshDelete implements a part of syncInfoValue described in https://www.ietf.org/rfc/rfc4533.txt
+type ControlSyncInfoRefreshDelete struct {
+ Cookie []byte
+ RefreshDone bool
+}
+
+// String returns a human-readable description
+func (c *ControlSyncInfoRefreshDelete) String() string {
+ return fmt.Sprintf(
+ "RefreshDelete[Cookie: %s RefreshDone: %t]",
+ string(c.Cookie),
+ c.RefreshDone,
+ )
+}
+
+// ControlSyncInfoRefreshPresent implements a part of syncInfoValue described in https://www.ietf.org/rfc/rfc4533.txt
+type ControlSyncInfoRefreshPresent struct {
+ Cookie []byte
+ RefreshDone bool
+}
+
+// String returns a human-readable description
+func (c *ControlSyncInfoRefreshPresent) String() string {
+ return fmt.Sprintf(
+ "RefreshPresent[Cookie: %s RefreshDone: %t]",
+ string(c.Cookie),
+ c.RefreshDone,
+ )
+}
+
+// ControlSyncInfoSyncIdSet implements a part of syncInfoValue described in https://www.ietf.org/rfc/rfc4533.txt
+type ControlSyncInfoSyncIdSet struct {
+ Cookie []byte
+ RefreshDeletes bool
+ SyncUUIDs []uuid.UUID
+}
+
+// String returns a human-readable description
+func (c *ControlSyncInfoSyncIdSet) String() string {
+ return fmt.Sprintf(
+ "SyncIdSet[Cookie: %s RefreshDeletes: %t SyncUUIDs: %v]",
+ string(c.Cookie),
+ c.RefreshDeletes,
+ c.SyncUUIDs,
+ )
+}
+
+// ControlSyncInfo implements the Sync Info Control described in https://www.ietf.org/rfc/rfc4533.txt
+type ControlSyncInfo struct {
+ Criticality bool
+ Value ControlSyncInfoValue
+ NewCookie *ControlSyncInfoNewCookie
+ RefreshDelete *ControlSyncInfoRefreshDelete
+ RefreshPresent *ControlSyncInfoRefreshPresent
+ SyncIdSet *ControlSyncInfoSyncIdSet
+}
+
+func NewControlSyncInfo(pkt *ber.Packet) (*ControlSyncInfo, error) {
+ var (
+ cookie []byte
+ refreshDone = true
+ refreshDeletes bool
+ syncUUIDs []uuid.UUID
+ )
+ c := &ControlSyncInfo{Criticality: false}
+ switch ControlSyncInfoValue(pkt.Identifier.Tag) {
+ case SyncInfoNewcookie:
+ c.Value = SyncInfoNewcookie
+ c.NewCookie = &ControlSyncInfoNewCookie{
+ Cookie: pkt.ByteValue,
+ }
+ case SyncInfoRefreshDelete:
+ c.Value = SyncInfoRefreshDelete
+ switch len(pkt.Children) {
+ case 0:
+ // have nothing to do
+ case 1:
+ cookie = pkt.Children[0].ByteValue
+ case 2:
+ cookie = pkt.Children[0].ByteValue
+ refreshDone = pkt.Children[1].Value.(bool)
+ }
+ c.RefreshDelete = &ControlSyncInfoRefreshDelete{
+ Cookie: cookie,
+ RefreshDone: refreshDone,
+ }
+ case SyncInfoRefreshPresent:
+ c.Value = SyncInfoRefreshPresent
+ switch len(pkt.Children) {
+ case 0:
+ // have nothing to do
+ case 1:
+ cookie = pkt.Children[0].ByteValue
+ case 2:
+ cookie = pkt.Children[0].ByteValue
+ refreshDone = pkt.Children[1].Value.(bool)
+ }
+ c.RefreshPresent = &ControlSyncInfoRefreshPresent{
+ Cookie: cookie,
+ RefreshDone: refreshDone,
+ }
+ case SyncInfoSyncIdSet:
+ c.Value = SyncInfoSyncIdSet
+ switch len(pkt.Children) {
+ case 0:
+ // have nothing to do
+ case 1:
+ cookie = pkt.Children[0].ByteValue
+ case 2:
+ cookie = pkt.Children[0].ByteValue
+ refreshDeletes = pkt.Children[1].Value.(bool)
+ case 3:
+ cookie = pkt.Children[0].ByteValue
+ refreshDeletes = pkt.Children[1].Value.(bool)
+ syncUUIDs = make([]uuid.UUID, 0, len(pkt.Children[2].Children))
+ for _, child := range pkt.Children[2].Children {
+ u, err := uuid.FromBytes(child.ByteValue)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode uuid: %w", err)
+ }
+ syncUUIDs = append(syncUUIDs, u)
+ }
+ }
+ c.SyncIdSet = &ControlSyncInfoSyncIdSet{
+ Cookie: cookie,
+ RefreshDeletes: refreshDeletes,
+ SyncUUIDs: syncUUIDs,
+ }
+ default:
+ return nil, fmt.Errorf("unknown sync info value: %d", pkt.Identifier.Tag)
+ }
+ return c, nil
+}
+
+// GetControlType returns the OID
+func (c *ControlSyncInfo) GetControlType() string {
+ return ControlTypeSyncInfo
+}
+
+// Encode encodes the control
+func (c *ControlSyncInfo) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlSyncInfo) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Value: %d %s %s %s %s",
+ ControlTypeMap[ControlTypeSyncInfo],
+ ControlTypeSyncInfo,
+ c.Criticality,
+ c.Value,
+ c.NewCookie,
+ c.RefreshDelete,
+ c.RefreshPresent,
+ c.SyncIdSet,
+ )
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/debug.go b/vendor/github.com/go-ldap/ldap/v3/debug.go
new file mode 100644
index 0000000..6f89b4a
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/debug.go
@@ -0,0 +1,28 @@
+package ldap
+
+import (
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// debugging type
+// - has a Printf method to write the debug output
+type debugging bool
+
+// Enable controls debugging mode.
+func (debug *debugging) Enable(b bool) {
+ *debug = debugging(b)
+}
+
+// Printf writes debug output.
+func (debug debugging) Printf(format string, args ...interface{}) {
+ if debug {
+ logger.Printf(format, args...)
+ }
+}
+
+// PrintPacket dumps a packet.
+func (debug debugging) PrintPacket(packet *ber.Packet) {
+ if debug {
+ ber.WritePacket(logger.Writer(), packet)
+ }
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/del.go b/vendor/github.com/go-ldap/ldap/v3/del.go
new file mode 100644
index 0000000..6230695
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/del.go
@@ -0,0 +1,59 @@
+package ldap
+
+import (
+ "fmt"
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// DelRequest implements an LDAP deletion request
+type DelRequest struct {
+ // DN is the name of the directory entry to delete
+ DN string
+ // Controls hold optional controls to send with the request
+ Controls []Control
+}
+
+func (req *DelRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, req.DN, "Del Request")
+ pkt.Data.Write([]byte(req.DN))
+
+ envelope.AppendChild(pkt)
+ if len(req.Controls) > 0 {
+ envelope.AppendChild(encodeControls(req.Controls))
+ }
+
+ return nil
+}
+
+// NewDelRequest creates a delete request for the given DN and controls
+func NewDelRequest(DN string, Controls []Control) *DelRequest {
+ return &DelRequest{
+ DN: DN,
+ Controls: Controls,
+ }
+}
+
+// Del executes the given delete request
+func (l *Conn) Del(delRequest *DelRequest) error {
+ msgCtx, err := l.doRequest(delRequest)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return err
+ }
+
+ if packet.Children[1].Tag == ApplicationDelResponse {
+ err := GetLDAPError(packet)
+ if err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("ldap: unexpected response: %d", packet.Children[1].Tag)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/dn.go b/vendor/github.com/go-ldap/ldap/v3/dn.go
new file mode 100644
index 0000000..6520b8e
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/dn.go
@@ -0,0 +1,468 @@
+package ldap
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ ber "github.com/go-asn1-ber/asn1-ber"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
+type AttributeTypeAndValue struct {
+ // Type is the attribute type
+ Type string
+ // Value is the attribute value
+ Value string
+}
+
+func (a *AttributeTypeAndValue) setType(str string) error {
+ result, err := decodeString(str)
+ if err != nil {
+ return err
+ }
+ a.Type = result
+
+ return nil
+}
+
+func (a *AttributeTypeAndValue) setValue(s string) error {
+ // https://www.ietf.org/rfc/rfc4514.html#section-2.4
+ // If the AttributeType is of the dotted-decimal form, the
+ // AttributeValue is represented by an number sign ('#' U+0023)
+ // character followed by the hexadecimal encoding of each of the octets
+ // of the BER encoding of the X.500 AttributeValue.
+ if len(s) > 0 && s[0] == '#' {
+ decodedString, err := decodeEncodedString(s[1:])
+ if err != nil {
+ return err
+ }
+
+ a.Value = decodedString
+ return nil
+ } else {
+ decodedString, err := decodeString(s)
+ if err != nil {
+ return err
+ }
+
+ a.Value = decodedString
+ return nil
+ }
+}
+
+// String returns a normalized string representation of this attribute type and
+// value pair which is the lowercase join of the Type and Value with a "=".
+func (a *AttributeTypeAndValue) String() string {
+ return encodeString(foldString(a.Type), false) + "=" + encodeString(a.Value, true)
+}
+
+// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
+type RelativeDN struct {
+ Attributes []*AttributeTypeAndValue
+}
+
+// String returns a normalized string representation of this relative DN which
+// is the a join of all attributes (sorted in increasing order) with a "+".
+func (r *RelativeDN) String() string {
+ attrs := make([]string, len(r.Attributes))
+ for i := range r.Attributes {
+ attrs[i] = r.Attributes[i].String()
+ }
+ sort.Strings(attrs)
+ return strings.Join(attrs, "+")
+}
+
+// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
+type DN struct {
+ RDNs []*RelativeDN
+}
+
+// String returns a normalized string representation of this DN which is the
+// join of all relative DNs with a ",".
+func (d *DN) String() string {
+ rdns := make([]string, len(d.RDNs))
+ for i := range d.RDNs {
+ rdns[i] = d.RDNs[i].String()
+ }
+ return strings.Join(rdns, ",")
+}
+
+func stripLeadingAndTrailingSpaces(inVal string) string {
+ noSpaces := strings.Trim(inVal, " ")
+
+ // Re-add the trailing space if it was an escaped space
+ if len(noSpaces) > 0 && noSpaces[len(noSpaces)-1] == '\\' && inVal[len(inVal)-1] == ' ' {
+ noSpaces = noSpaces + " "
+ }
+
+ return noSpaces
+}
+
+// Remove leading and trailing spaces from the attribute type and value
+// and unescape any escaped characters in these fields
+//
+// decodeString is based on https://github.com/inteon/cert-manager/blob/ed280d28cd02b262c5db46054d88e70ab518299c/pkg/util/pki/internal/dn.go#L170
+func decodeString(str string) (string, error) {
+ s := []rune(stripLeadingAndTrailingSpaces(str))
+
+ builder := strings.Builder{}
+ for i := 0; i < len(s); i++ {
+ char := s[i]
+
+ // If the character is not an escape character, just add it to the
+ // builder and continue
+ if char != '\\' {
+ builder.WriteRune(char)
+ continue
+ }
+
+ // If the escape character is the last character, it's a corrupted
+ // escaped character
+ if i+1 >= len(s) {
+ return "", fmt.Errorf("got corrupted escaped character: '%s'", string(s))
+ }
+
+ // If the escaped character is a special character, just add it to
+ // the builder and continue
+ switch s[i+1] {
+ case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
+ builder.WriteRune(s[i+1])
+ i++
+ continue
+ }
+
+ // If the escaped character is not a special character, it should
+ // be a hex-encoded character of the form \XX if it's not at least
+ // two characters long, it's a corrupted escaped character
+ if i+2 >= len(s) {
+ return "", errors.New("failed to decode escaped character: encoding/hex: invalid byte: " + string(s[i+1]))
+ }
+
+ // Get the runes for the two characters after the escape character
+ // and convert them to a byte slice
+ xx := []byte(string(s[i+1 : i+3]))
+
+ // If the two runes are not hex characters and result in more than
+ // two bytes when converted to a byte slice, it's a corrupted
+ // escaped character
+ if len(xx) != 2 {
+ return "", errors.New("failed to decode escaped character: invalid byte: " + string(xx))
+ }
+
+ // Decode the hex-encoded character and add it to the builder
+ dst := []byte{0}
+ if n, err := hex.Decode(dst, xx); err != nil {
+ return "", errors.New("failed to decode escaped character: " + err.Error())
+ } else if n != 1 {
+ return "", fmt.Errorf("failed to decode escaped character: encoding/hex: expected 1 byte when un-escaping, got %d", n)
+ }
+
+ builder.WriteByte(dst[0])
+ i += 2
+ }
+
+ return builder.String(), nil
+}
+
+// Escape a string according to RFC 4514
+func encodeString(value string, isValue bool) string {
+ builder := strings.Builder{}
+
+ escapeChar := func(c byte) {
+ builder.WriteByte('\\')
+ builder.WriteByte(c)
+ }
+
+ escapeHex := func(c byte) {
+ builder.WriteByte('\\')
+ builder.WriteString(hex.EncodeToString([]byte{c}))
+ }
+
+ // Loop through each byte and escape as necessary.
+ // Runes that take up more than one byte are escaped
+ // byte by byte (since both bytes are non-ASCII).
+ for i := 0; i < len(value); i++ {
+ char := value[i]
+ if i == 0 && (char == ' ' || char == '#') {
+ // Special case leading space or number sign.
+ escapeChar(char)
+ continue
+ }
+ if i == len(value)-1 && char == ' ' {
+ // Special case trailing space.
+ escapeChar(char)
+ continue
+ }
+
+ switch char {
+ case '"', '+', ',', ';', '<', '>', '\\':
+ // Each of these special characters must be escaped.
+ escapeChar(char)
+ continue
+ }
+
+ if !isValue && char == '=' {
+ // Equal signs have to be escaped only in the type part of
+ // the attribute type and value pair.
+ escapeChar(char)
+ continue
+ }
+
+ if char < ' ' || char > '~' {
+ // All special character escapes are handled first
+ // above. All bytes less than ASCII SPACE and all bytes
+ // greater than ASCII TILDE must be hex-escaped.
+ escapeHex(char)
+ continue
+ }
+
+ // Any other character does not require escaping.
+ builder.WriteByte(char)
+ }
+
+ return builder.String()
+}
+
+func decodeEncodedString(str string) (string, error) {
+ decoded, err := hex.DecodeString(str)
+ if err != nil {
+ return "", fmt.Errorf("failed to decode BER encoding: %w", err)
+ }
+
+ packet, err := ber.DecodePacketErr(decoded)
+ if err != nil {
+ return "", fmt.Errorf("failed to decode BER encoding: %w", err)
+ }
+
+ return packet.Data.String(), nil
+}
+
+// ParseDN returns a distinguishedName or an error.
+// The function respects https://tools.ietf.org/html/rfc4514
+func ParseDN(str string) (*DN, error) {
+ var dn = &DN{RDNs: make([]*RelativeDN, 0)}
+ if strings.TrimSpace(str) == "" {
+ return dn, nil
+ }
+
+ var (
+ rdn = &RelativeDN{}
+ attr = &AttributeTypeAndValue{}
+ escaping bool
+ startPos int
+ appendAttributesToRDN = func(end bool) {
+ rdn.Attributes = append(rdn.Attributes, attr)
+ attr = &AttributeTypeAndValue{}
+ if end {
+ dn.RDNs = append(dn.RDNs, rdn)
+ rdn = &RelativeDN{}
+ }
+ }
+ )
+
+ // Loop through each character in the string and
+ // build up the attribute type and value pairs.
+ // We only check for ascii characters here, which
+ // allows us to iterate over the string byte by byte.
+ for i := 0; i < len(str); i++ {
+ char := str[i]
+ switch {
+ case escaping:
+ escaping = false
+ case char == '\\':
+ escaping = true
+ case char == '=' && len(attr.Type) == 0:
+ if err := attr.setType(str[startPos:i]); err != nil {
+ return nil, err
+ }
+ startPos = i + 1
+ case char == ',' || char == '+' || char == ';':
+ if len(attr.Type) == 0 {
+ return dn, errors.New("incomplete type, value pair")
+ }
+ if err := attr.setValue(str[startPos:i]); err != nil {
+ return nil, err
+ }
+
+ startPos = i + 1
+ last := char == ',' || char == ';'
+ appendAttributesToRDN(last)
+ }
+ }
+
+ if len(attr.Type) == 0 {
+ return dn, errors.New("DN ended with incomplete type, value pair")
+ }
+
+ if err := attr.setValue(str[startPos:]); err != nil {
+ return dn, err
+ }
+ appendAttributesToRDN(true)
+
+ return dn, nil
+}
+
+// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Returns true if they have the same number of relative distinguished names
+// and corresponding relative distinguished names (by position) are the same.
+func (d *DN) Equal(other *DN) bool {
+ if len(d.RDNs) != len(other.RDNs) {
+ return false
+ }
+ for i := range d.RDNs {
+ if !d.RDNs[i].Equal(other.RDNs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
+// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
+// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
+// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
+func (d *DN) AncestorOf(other *DN) bool {
+ if len(d.RDNs) >= len(other.RDNs) {
+ return false
+ }
+ // Take the last `len(d.RDNs)` RDNs from the other DN to compare against
+ otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
+ for i := range d.RDNs {
+ if !d.RDNs[i].Equal(otherRDNs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
+// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
+// The order of attributes is not significant.
+// Case of attribute types is not significant.
+func (r *RelativeDN) Equal(other *RelativeDN) bool {
+ if len(r.Attributes) != len(other.Attributes) {
+ return false
+ }
+ return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
+}
+
+func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
+ for _, attr := range attrs {
+ found := false
+ for _, myattr := range r.Attributes {
+ if myattr.Equal(attr) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
+// Case of the attribute type is not significant
+func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
+ return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
+}
+
+// EqualFold returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Returns true if they have the same number of relative distinguished names
+// and corresponding relative distinguished names (by position) are the same.
+// Case of the attribute type and value is not significant
+func (d *DN) EqualFold(other *DN) bool {
+ if len(d.RDNs) != len(other.RDNs) {
+ return false
+ }
+ for i := range d.RDNs {
+ if !d.RDNs[i].EqualFold(other.RDNs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// AncestorOfFold returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
+// Case of the attribute type and value is not significant
+func (d *DN) AncestorOfFold(other *DN) bool {
+ if len(d.RDNs) >= len(other.RDNs) {
+ return false
+ }
+ // Take the last `len(d.RDNs)` RDNs from the other DN to compare against
+ otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
+ for i := range d.RDNs {
+ if !d.RDNs[i].EqualFold(otherRDNs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFold returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Case of the attribute type is not significant
+func (r *RelativeDN) EqualFold(other *RelativeDN) bool {
+ if len(r.Attributes) != len(other.Attributes) {
+ return false
+ }
+ return r.hasAllAttributesFold(other.Attributes) && other.hasAllAttributesFold(r.Attributes)
+}
+
+func (r *RelativeDN) hasAllAttributesFold(attrs []*AttributeTypeAndValue) bool {
+ for _, attr := range attrs {
+ found := false
+ for _, myattr := range r.Attributes {
+ if myattr.EqualFold(attr) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFold returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
+// Case of the attribute type and value is not significant
+func (a *AttributeTypeAndValue) EqualFold(other *AttributeTypeAndValue) bool {
+ return strings.EqualFold(a.Type, other.Type) && strings.EqualFold(a.Value, other.Value)
+}
+
+// foldString returns a folded string such that foldString(x) == foldString(y)
+// is identical to bytes.EqualFold(x, y).
+// based on https://go.dev/src/encoding/json/fold.go
+func foldString(s string) string {
+ builder := strings.Builder{}
+ for _, char := range s {
+ // Handle single-byte ASCII.
+ if char < utf8.RuneSelf {
+ if 'A' <= char && char <= 'Z' {
+ char += 'a' - 'A'
+ }
+ builder.WriteRune(char)
+ continue
+ }
+
+ builder.WriteRune(foldRune(char))
+ }
+ return builder.String()
+}
+
+// foldRune is returns the smallest rune for all runes in the same fold set.
+func foldRune(r rune) rune {
+ for {
+ r2 := unicode.SimpleFold(r)
+ if r2 <= r {
+ return r
+ }
+ r = r2
+ }
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/doc.go b/vendor/github.com/go-ldap/ldap/v3/doc.go
new file mode 100644
index 0000000..f20d39b
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/doc.go
@@ -0,0 +1,4 @@
+/*
+Package ldap provides basic LDAP v3 functionality.
+*/
+package ldap
diff --git a/vendor/github.com/go-ldap/ldap/v3/error.go b/vendor/github.com/go-ldap/ldap/v3/error.go
new file mode 100644
index 0000000..0014ffe
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/error.go
@@ -0,0 +1,262 @@
+package ldap
+
+import (
+ "errors"
+ "fmt"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// LDAP Result Codes
+const (
+ LDAPResultSuccess = 0
+ LDAPResultOperationsError = 1
+ LDAPResultProtocolError = 2
+ LDAPResultTimeLimitExceeded = 3
+ LDAPResultSizeLimitExceeded = 4
+ LDAPResultCompareFalse = 5
+ LDAPResultCompareTrue = 6
+ LDAPResultAuthMethodNotSupported = 7
+ LDAPResultStrongAuthRequired = 8
+ LDAPResultReferral = 10
+ LDAPResultAdminLimitExceeded = 11
+ LDAPResultUnavailableCriticalExtension = 12
+ LDAPResultConfidentialityRequired = 13
+ LDAPResultSaslBindInProgress = 14
+ LDAPResultNoSuchAttribute = 16
+ LDAPResultUndefinedAttributeType = 17
+ LDAPResultInappropriateMatching = 18
+ LDAPResultConstraintViolation = 19
+ LDAPResultAttributeOrValueExists = 20
+ LDAPResultInvalidAttributeSyntax = 21
+ LDAPResultNoSuchObject = 32
+ LDAPResultAliasProblem = 33
+ LDAPResultInvalidDNSyntax = 34
+ LDAPResultIsLeaf = 35
+ LDAPResultAliasDereferencingProblem = 36
+ LDAPResultInappropriateAuthentication = 48
+ LDAPResultInvalidCredentials = 49
+ LDAPResultInsufficientAccessRights = 50
+ LDAPResultBusy = 51
+ LDAPResultUnavailable = 52
+ LDAPResultUnwillingToPerform = 53
+ LDAPResultLoopDetect = 54
+ LDAPResultSortControlMissing = 60
+ LDAPResultOffsetRangeError = 61
+ LDAPResultNamingViolation = 64
+ LDAPResultObjectClassViolation = 65
+ LDAPResultNotAllowedOnNonLeaf = 66
+ LDAPResultNotAllowedOnRDN = 67
+ LDAPResultEntryAlreadyExists = 68
+ LDAPResultObjectClassModsProhibited = 69
+ LDAPResultResultsTooLarge = 70
+ LDAPResultAffectsMultipleDSAs = 71
+ LDAPResultVirtualListViewErrorOrControlError = 76
+ LDAPResultOther = 80
+ LDAPResultServerDown = 81
+ LDAPResultLocalError = 82
+ LDAPResultEncodingError = 83
+ LDAPResultDecodingError = 84
+ LDAPResultTimeout = 85
+ LDAPResultAuthUnknown = 86
+ LDAPResultFilterError = 87
+ LDAPResultUserCanceled = 88
+ LDAPResultParamError = 89
+ LDAPResultNoMemory = 90
+ LDAPResultConnectError = 91
+ LDAPResultNotSupported = 92
+ LDAPResultControlNotFound = 93
+ LDAPResultNoResultsReturned = 94
+ LDAPResultMoreResultsToReturn = 95
+ LDAPResultClientLoop = 96
+ LDAPResultReferralLimitExceeded = 97
+ LDAPResultInvalidResponse = 100
+ LDAPResultAmbiguousResponse = 101
+ LDAPResultTLSNotSupported = 112
+ LDAPResultIntermediateResponse = 113
+ LDAPResultUnknownType = 114
+ LDAPResultCanceled = 118
+ LDAPResultNoSuchOperation = 119
+ LDAPResultTooLate = 120
+ LDAPResultCannotCancel = 121
+ LDAPResultAssertionFailed = 122
+ LDAPResultAuthorizationDenied = 123
+ LDAPResultSyncRefreshRequired = 4096
+
+ ErrorNetwork = 200
+ ErrorFilterCompile = 201
+ ErrorFilterDecompile = 202
+ ErrorDebugging = 203
+ ErrorUnexpectedMessage = 204
+ ErrorUnexpectedResponse = 205
+ ErrorEmptyPassword = 206
+)
+
+// LDAPResultCodeMap contains string descriptions for LDAP error codes
+var LDAPResultCodeMap = map[uint16]string{
+ LDAPResultSuccess: "Success",
+ LDAPResultOperationsError: "Operations Error",
+ LDAPResultProtocolError: "Protocol Error",
+ LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
+ LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
+ LDAPResultCompareFalse: "Compare False",
+ LDAPResultCompareTrue: "Compare True",
+ LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
+ LDAPResultStrongAuthRequired: "Strong Auth Required",
+ LDAPResultReferral: "Referral",
+ LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
+ LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
+ LDAPResultConfidentialityRequired: "Confidentiality Required",
+ LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
+ LDAPResultNoSuchAttribute: "No Such Attribute",
+ LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
+ LDAPResultInappropriateMatching: "Inappropriate Matching",
+ LDAPResultConstraintViolation: "Constraint Violation",
+ LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
+ LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
+ LDAPResultNoSuchObject: "No Such Object",
+ LDAPResultAliasProblem: "Alias Problem",
+ LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
+ LDAPResultIsLeaf: "Is Leaf",
+ LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
+ LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
+ LDAPResultInvalidCredentials: "Invalid Credentials",
+ LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
+ LDAPResultBusy: "Busy",
+ LDAPResultUnavailable: "Unavailable",
+ LDAPResultUnwillingToPerform: "Unwilling To Perform",
+ LDAPResultLoopDetect: "Loop Detect",
+ LDAPResultSortControlMissing: "Sort Control Missing",
+ LDAPResultOffsetRangeError: "Result Offset Range Error",
+ LDAPResultNamingViolation: "Naming Violation",
+ LDAPResultObjectClassViolation: "Object Class Violation",
+ LDAPResultResultsTooLarge: "Results Too Large",
+ LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
+ LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
+ LDAPResultEntryAlreadyExists: "Entry Already Exists",
+ LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
+ LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
+ LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view",
+ LDAPResultOther: "Other",
+ LDAPResultServerDown: "Cannot establish a connection",
+ LDAPResultLocalError: "An error occurred",
+ LDAPResultEncodingError: "LDAP encountered an error while encoding",
+ LDAPResultDecodingError: "LDAP encountered an error while decoding",
+ LDAPResultTimeout: "LDAP timeout while waiting for a response from the server",
+ LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown",
+ LDAPResultFilterError: "An error occurred while encoding the given search filter",
+ LDAPResultUserCanceled: "The user canceled the operation",
+ LDAPResultParamError: "An invalid parameter was specified",
+ LDAPResultNoMemory: "Out of memory error",
+ LDAPResultConnectError: "A connection to the server could not be established",
+ LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP",
+ LDAPResultControlNotFound: "The controls required to perform the requested operation were not found",
+ LDAPResultNoResultsReturned: "No results were returned from the server",
+ LDAPResultMoreResultsToReturn: "There are more results in the chain of results",
+ LDAPResultClientLoop: "A loop has been detected. For example when following referrals",
+ LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded",
+ LDAPResultCanceled: "Operation was canceled",
+ LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation",
+ LDAPResultTooLate: "Too late to cancel the outstanding operation",
+ LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed",
+ LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed",
+ LDAPResultSyncRefreshRequired: "Refresh Required",
+ LDAPResultInvalidResponse: "Invalid Response",
+ LDAPResultAmbiguousResponse: "Ambiguous Response",
+ LDAPResultTLSNotSupported: "Tls Not Supported",
+ LDAPResultIntermediateResponse: "Intermediate Response",
+ LDAPResultUnknownType: "Unknown Type",
+ LDAPResultAuthorizationDenied: "Authorization Denied",
+
+ ErrorNetwork: "Network Error",
+ ErrorFilterCompile: "Filter Compile Error",
+ ErrorFilterDecompile: "Filter Decompile Error",
+ ErrorDebugging: "Debugging Error",
+ ErrorUnexpectedMessage: "Unexpected Message",
+ ErrorUnexpectedResponse: "Unexpected Response",
+ ErrorEmptyPassword: "Empty password not allowed by the client",
+}
+
+// Error holds LDAP error information
+type Error struct {
+ // Err is the underlying error
+ Err error
+ // ResultCode is the LDAP error code
+ ResultCode uint16
+ // MatchedDN is the matchedDN returned if any
+ MatchedDN string
+ // Packet is the returned packet if any
+ Packet *ber.Packet
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
+}
+
+func (e *Error) Unwrap() error { return e.Err }
+
+// GetLDAPError creates an Error out of a BER packet representing a LDAPResult
+// The return is an error object. It can be casted to a Error structure.
+// This function returns nil if resultCode in the LDAPResult sequence is success(0).
+func GetLDAPError(packet *ber.Packet) error {
+ if packet == nil {
+ return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")}
+ }
+
+ if len(packet.Children) >= 2 {
+ response := packet.Children[1]
+ if response == nil {
+ return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet"), Packet: packet}
+ }
+ if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
+ if ber.Type(response.Children[0].Tag) == ber.Type(ber.TagInteger) || ber.Type(response.Children[0].Tag) == ber.Type(ber.TagEnumerated) {
+ resultCode := uint16(response.Children[0].Value.(int64))
+ if resultCode == 0 { // No error
+ return nil
+ }
+
+ if ber.Type(response.Children[1].Tag) == ber.Type(ber.TagOctetString) &&
+ ber.Type(response.Children[2].Tag) == ber.Type(ber.TagOctetString) {
+ return &Error{
+ ResultCode: resultCode,
+ MatchedDN: response.Children[1].Value.(string),
+ Err: fmt.Errorf("%v", response.Children[2].Value),
+ Packet: packet,
+ }
+ }
+ }
+ }
+ }
+
+ return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format"), Packet: packet}
+}
+
+// NewError creates an LDAP error with the given code and underlying error
+func NewError(resultCode uint16, err error) error {
+ return &Error{ResultCode: resultCode, Err: err}
+}
+
+// IsErrorAnyOf returns true if the given error is an LDAP error with any one of the given result codes
+func IsErrorAnyOf(err error, codes ...uint16) bool {
+ if err == nil {
+ return false
+ }
+
+ var serverError *Error
+ if !errors.As(err, &serverError) {
+ return false
+ }
+
+ for _, code := range codes {
+ if serverError.ResultCode == code {
+ return true
+ }
+ }
+
+ return false
+}
+
+// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
+func IsErrorWithCode(err error, desiredResultCode uint16) bool {
+ return IsErrorAnyOf(err, desiredResultCode)
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/extended.go b/vendor/github.com/go-ldap/ldap/v3/extended.go
new file mode 100644
index 0000000..e71d982
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/extended.go
@@ -0,0 +1,100 @@
+package ldap
+
+import (
+ "fmt"
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// ExtendedRequest represents an extended request to send to the server
+// See: https://www.rfc-editor.org/rfc/rfc4511#section-4.12
+type ExtendedRequest struct {
+ // ExtendedRequest ::= [APPLICATION 23] SEQUENCE {
+ // requestName [0] LDAPOID,
+ // requestValue [1] OCTET STRING OPTIONAL }
+
+ Name string
+ Value *ber.Packet
+ Controls []Control
+}
+
+// NewExtendedRequest returns a new ExtendedRequest. The value can be
+// nil depending on the type of request
+func NewExtendedRequest(name string, value *ber.Packet) *ExtendedRequest {
+ return &ExtendedRequest{
+ Name: name,
+ Value: value,
+ }
+}
+
+func (er ExtendedRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Extended Request")
+ pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, ber.TagEOC, er.Name, "Extended Request Name"))
+ if er.Value != nil {
+ pkt.AppendChild(er.Value)
+ }
+ envelope.AppendChild(pkt)
+ if len(er.Controls) > 0 {
+ envelope.AppendChild(encodeControls(er.Controls))
+ }
+ return nil
+}
+
+// ExtendedResponse represents the response from the directory server
+// after sending an extended request
+// See: https://www.rfc-editor.org/rfc/rfc4511#section-4.12
+type ExtendedResponse struct {
+ // ExtendedResponse ::= [APPLICATION 24] SEQUENCE {
+ // COMPONENTS OF LDAPResult,
+ // responseName [10] LDAPOID OPTIONAL,
+ // responseValue [11] OCTET STRING OPTIONAL }
+
+ Name string
+ Value *ber.Packet
+ Controls []Control
+}
+
+// Extended performs an extended request. The resulting
+// ExtendedResponse may return a value in the form of a *ber.Packet
+func (l *Conn) Extended(er *ExtendedRequest) (*ExtendedResponse, error) {
+ msgCtx, err := l.doRequest(er)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return nil, err
+ }
+ if err = GetLDAPError(packet); err != nil {
+ return nil, err
+ }
+
+ if len(packet.Children[1].Children) < 4 {
+ return nil, fmt.Errorf(
+ "ldap: malformed extended response: expected 4 children, got %d",
+ len(packet.Children),
+ )
+ }
+
+ response := &ExtendedResponse{
+ Name: packet.Children[1].Children[3].Data.String(),
+ Controls: make([]Control, 0),
+ }
+
+ if len(packet.Children) == 3 {
+ for _, child := range packet.Children[2].Children {
+ decodedChild, decodeErr := DecodeControl(child)
+ if decodeErr != nil {
+ return nil, fmt.Errorf("failed to decode child control: %s", decodeErr)
+ }
+ response.Controls = append(response.Controls, decodedChild)
+ }
+ }
+
+ if len(packet.Children[1].Children) == 5 {
+ response.Value = packet.Children[1].Children[4]
+ }
+
+ return response, nil
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/filter.go b/vendor/github.com/go-ldap/ldap/v3/filter.go
new file mode 100644
index 0000000..db76210
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/filter.go
@@ -0,0 +1,486 @@
+package ldap
+
+import (
+ "bytes"
+ hexpac "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// Filter choices
+const (
+ FilterAnd = 0
+ FilterOr = 1
+ FilterNot = 2
+ FilterEqualityMatch = 3
+ FilterSubstrings = 4
+ FilterGreaterOrEqual = 5
+ FilterLessOrEqual = 6
+ FilterPresent = 7
+ FilterApproxMatch = 8
+ FilterExtensibleMatch = 9
+)
+
+// FilterMap contains human readable descriptions of Filter choices
+var FilterMap = map[uint64]string{
+ FilterAnd: "And",
+ FilterOr: "Or",
+ FilterNot: "Not",
+ FilterEqualityMatch: "Equality Match",
+ FilterSubstrings: "Substrings",
+ FilterGreaterOrEqual: "Greater Or Equal",
+ FilterLessOrEqual: "Less Or Equal",
+ FilterPresent: "Present",
+ FilterApproxMatch: "Approx Match",
+ FilterExtensibleMatch: "Extensible Match",
+}
+
+// SubstringFilter options
+const (
+ FilterSubstringsInitial = 0
+ FilterSubstringsAny = 1
+ FilterSubstringsFinal = 2
+)
+
+// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
+var FilterSubstringsMap = map[uint64]string{
+ FilterSubstringsInitial: "Substrings Initial",
+ FilterSubstringsAny: "Substrings Any",
+ FilterSubstringsFinal: "Substrings Final",
+}
+
+// MatchingRuleAssertion choices
+const (
+ MatchingRuleAssertionMatchingRule = 1
+ MatchingRuleAssertionType = 2
+ MatchingRuleAssertionMatchValue = 3
+ MatchingRuleAssertionDNAttributes = 4
+)
+
+// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
+var MatchingRuleAssertionMap = map[uint64]string{
+ MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
+ MatchingRuleAssertionType: "Matching Rule Assertion Type",
+ MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value",
+ MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
+}
+
+var _SymbolAny = []byte{'*'}
+
+// CompileFilter converts a string representation of a filter into a BER-encoded packet
+func CompileFilter(filter string) (*ber.Packet, error) {
+ if len(filter) == 0 || filter[0] != '(' {
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
+ }
+ packet, pos, err := compileFilter(filter, 1)
+ if err != nil {
+ return nil, err
+ }
+ switch {
+ case pos > len(filter):
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ case pos < len(filter):
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
+ }
+ return packet, nil
+}
+
+// DecompileFilter converts a packet representation of a filter into a string representation
+func DecompileFilter(packet *ber.Packet) (_ string, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
+ }
+ }()
+
+ buf := bytes.NewBuffer(nil)
+ buf.WriteByte('(')
+ childStr := ""
+
+ switch packet.Tag {
+ case FilterAnd:
+ buf.WriteByte('&')
+ for _, child := range packet.Children {
+ childStr, err = DecompileFilter(child)
+ if err != nil {
+ return
+ }
+ buf.WriteString(childStr)
+ }
+ case FilterOr:
+ buf.WriteByte('|')
+ for _, child := range packet.Children {
+ childStr, err = DecompileFilter(child)
+ if err != nil {
+ return
+ }
+ buf.WriteString(childStr)
+ }
+ case FilterNot:
+ buf.WriteByte('!')
+ childStr, err = DecompileFilter(packet.Children[0])
+ if err != nil {
+ return
+ }
+ buf.WriteString(childStr)
+
+ case FilterSubstrings:
+ buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
+ buf.WriteByte('=')
+ for i, child := range packet.Children[1].Children {
+ if i == 0 && child.Tag != FilterSubstringsInitial {
+ buf.Write(_SymbolAny)
+ }
+ buf.WriteString(EscapeFilter(ber.DecodeString(child.Data.Bytes())))
+ if child.Tag != FilterSubstringsFinal {
+ buf.Write(_SymbolAny)
+ }
+ }
+ case FilterEqualityMatch:
+ buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
+ buf.WriteByte('=')
+ buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
+ case FilterGreaterOrEqual:
+ buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
+ buf.WriteString(">=")
+ buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
+ case FilterLessOrEqual:
+ buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
+ buf.WriteString("<=")
+ buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
+ case FilterPresent:
+ buf.WriteString(ber.DecodeString(packet.Data.Bytes()))
+ buf.WriteString("=*")
+ case FilterApproxMatch:
+ buf.WriteString(ber.DecodeString(packet.Children[0].Data.Bytes()))
+ buf.WriteString("~=")
+ buf.WriteString(EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())))
+ case FilterExtensibleMatch:
+ attr := ""
+ dnAttributes := false
+ matchingRule := ""
+ value := ""
+
+ for _, child := range packet.Children {
+ switch child.Tag {
+ case MatchingRuleAssertionMatchingRule:
+ matchingRule = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionType:
+ attr = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionMatchValue:
+ value = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionDNAttributes:
+ dnAttributes = child.Value.(bool)
+ }
+ }
+
+ if len(attr) > 0 {
+ buf.WriteString(attr)
+ }
+ if dnAttributes {
+ buf.WriteString(":dn")
+ }
+ if len(matchingRule) > 0 {
+ buf.WriteString(":")
+ buf.WriteString(matchingRule)
+ }
+ buf.WriteString(":=")
+ buf.WriteString(EscapeFilter(value))
+ }
+
+ buf.WriteByte(')')
+
+ return buf.String(), nil
+}
+
+func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
+ for pos < len(filter) && filter[pos] == '(' {
+ child, newPos, err := compileFilter(filter, pos+1)
+ if err != nil {
+ return pos, err
+ }
+ pos = newPos
+ parent.AppendChild(child)
+ }
+ if pos == len(filter) {
+ return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ }
+
+ return pos + 1, nil
+}
+
+func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
+ var (
+ packet *ber.Packet
+ err error
+ )
+
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
+ }
+ }()
+ newPos := pos
+
+ currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
+
+ switch currentRune {
+ case utf8.RuneError:
+ return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
+ case '(':
+ packet, newPos, err = compileFilter(filter, pos+currentWidth)
+ newPos++
+ return packet, newPos, err
+ case '&':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
+ newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
+ return packet, newPos, err
+ case '|':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
+ newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
+ return packet, newPos, err
+ case '!':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
+ var child *ber.Packet
+ child, newPos, err = compileFilter(filter, pos+currentWidth)
+ packet.AppendChild(child)
+ return packet, newPos, err
+ default:
+ const (
+ stateReadingAttr = 0
+ stateReadingExtensibleMatchingRule = 1
+ stateReadingCondition = 2
+ )
+
+ state := stateReadingAttr
+ attribute := bytes.NewBuffer(nil)
+ extensibleDNAttributes := false
+ extensibleMatchingRule := bytes.NewBuffer(nil)
+ condition := bytes.NewBuffer(nil)
+
+ for newPos < len(filter) {
+ remainingFilter := filter[newPos:]
+ currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
+ if currentRune == ')' {
+ break
+ }
+ if currentRune == utf8.RuneError {
+ return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
+ }
+
+ switch state {
+ case stateReadingAttr:
+ switch {
+ // Extensible rule, with only DN-matching
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ extensibleDNAttributes = true
+ state = stateReadingCondition
+ newPos += 5
+
+ // Extensible rule, with DN-matching and a matching OID
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ extensibleDNAttributes = true
+ state = stateReadingExtensibleMatchingRule
+ newPos += 4
+
+ // Extensible rule, with attr only
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Extensible rule, with no DN attribute matching
+ case currentRune == ':':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ state = stateReadingExtensibleMatchingRule
+ newPos++
+
+ // Equality condition
+ case currentRune == '=':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
+ state = stateReadingCondition
+ newPos++
+
+ // Greater-than or equal
+ case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Less-than or equal
+ case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Approx
+ case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Still reading the attribute name
+ default:
+ attribute.WriteRune(currentRune)
+ newPos += currentWidth
+ }
+
+ case stateReadingExtensibleMatchingRule:
+ switch {
+
+ // Matching rule OID is done
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
+ state = stateReadingCondition
+ newPos += 2
+
+ // Still reading the matching rule oid
+ default:
+ extensibleMatchingRule.WriteRune(currentRune)
+ newPos += currentWidth
+ }
+
+ case stateReadingCondition:
+ // append to the condition
+ condition.WriteRune(currentRune)
+ newPos += currentWidth
+ }
+ }
+
+ if newPos == len(filter) {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ return packet, newPos, err
+ }
+ if packet == nil {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
+ return packet, newPos, err
+ }
+
+ switch {
+ case packet.Tag == FilterExtensibleMatch:
+ // MatchingRuleAssertion ::= SEQUENCE {
+ // matchingRule [1] MatchingRuleID OPTIONAL,
+ // type [2] AttributeDescription OPTIONAL,
+ // matchValue [3] AssertionValue,
+ // dnAttributes [4] BOOLEAN DEFAULT FALSE
+ // }
+
+ // Include the matching rule oid, if specified
+ if extensibleMatchingRule.Len() > 0 {
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule.String(), MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
+ }
+
+ // Include the attribute, if specified
+ if attribute.Len() > 0 {
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute.String(), MatchingRuleAssertionMap[MatchingRuleAssertionType]))
+ }
+
+ // Add the value (only required child)
+ encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes())
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
+
+ // Defaults to false, so only include in the sequence if true
+ if extensibleDNAttributes {
+ packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
+ }
+
+ case packet.Tag == FilterEqualityMatch && bytes.Equal(condition.Bytes(), _SymbolAny):
+ packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute.String(), FilterMap[FilterPresent])
+ case packet.Tag == FilterEqualityMatch && bytes.Contains(condition.Bytes(), _SymbolAny):
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute"))
+ packet.Tag = FilterSubstrings
+ packet.Description = FilterMap[uint64(packet.Tag)]
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
+ parts := bytes.Split(condition.Bytes(), _SymbolAny)
+ for i, part := range parts {
+ if len(part) == 0 {
+ continue
+ }
+ var tag ber.Tag
+ switch i {
+ case 0:
+ tag = FilterSubstringsInitial
+ case len(parts) - 1:
+ tag = FilterSubstringsFinal
+ default:
+ tag = FilterSubstringsAny
+ }
+ encodedString, encodeErr := decodeEscapedSymbols(part)
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
+ }
+ packet.AppendChild(seq)
+ default:
+ encodedString, encodeErr := decodeEscapedSymbols(condition.Bytes())
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute.String(), "Attribute"))
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
+ }
+
+ newPos += currentWidth
+ return packet, newPos, err
+ }
+}
+
+// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
+func decodeEscapedSymbols(src []byte) (string, error) {
+ var (
+ buffer bytes.Buffer
+ offset int
+ reader = bytes.NewReader(src)
+ byteHex []byte
+ byteVal []byte
+ )
+
+ for {
+ runeVal, runeSize, err := reader.ReadRune()
+ if err == io.EOF {
+ return buffer.String(), nil
+ } else if err != nil {
+ return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: failed to read filter: %v", err))
+ } else if runeVal == unicode.ReplacementChar {
+ return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", offset))
+ }
+
+ if runeVal == '\\' {
+ // http://tools.ietf.org/search/rfc4515
+ // \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
+ // being a member of UTF1SUBSET.
+ if byteHex == nil {
+ byteHex = make([]byte, 2)
+ byteVal = make([]byte, 1)
+ }
+
+ if _, err := io.ReadFull(reader, byteHex); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
+ }
+ return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err))
+ }
+
+ if _, err := hexpac.Decode(byteVal, byteHex); err != nil {
+ return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: invalid characters for escape in filter: %v", err))
+ }
+
+ buffer.Write(byteVal)
+ } else {
+ buffer.WriteRune(runeVal)
+ }
+
+ offset += runeSize
+ }
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/ldap.go b/vendor/github.com/go-ldap/ldap/v3/ldap.go
new file mode 100644
index 0000000..802a529
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/ldap.go
@@ -0,0 +1,389 @@
+package ldap
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// LDAP Application Codes
+const (
+ ApplicationBindRequest = 0
+ ApplicationBindResponse = 1
+ ApplicationUnbindRequest = 2
+ ApplicationSearchRequest = 3
+ ApplicationSearchResultEntry = 4
+ ApplicationSearchResultDone = 5
+ ApplicationModifyRequest = 6
+ ApplicationModifyResponse = 7
+ ApplicationAddRequest = 8
+ ApplicationAddResponse = 9
+ ApplicationDelRequest = 10
+ ApplicationDelResponse = 11
+ ApplicationModifyDNRequest = 12
+ ApplicationModifyDNResponse = 13
+ ApplicationCompareRequest = 14
+ ApplicationCompareResponse = 15
+ ApplicationAbandonRequest = 16
+ ApplicationSearchResultReference = 19
+ ApplicationExtendedRequest = 23
+ ApplicationExtendedResponse = 24
+ ApplicationIntermediateResponse = 25
+)
+
+// ApplicationMap contains human readable descriptions of LDAP Application Codes
+var ApplicationMap = map[uint8]string{
+ ApplicationBindRequest: "Bind Request",
+ ApplicationBindResponse: "Bind Response",
+ ApplicationUnbindRequest: "Unbind Request",
+ ApplicationSearchRequest: "Search Request",
+ ApplicationSearchResultEntry: "Search Result Entry",
+ ApplicationSearchResultDone: "Search Result Done",
+ ApplicationModifyRequest: "Modify Request",
+ ApplicationModifyResponse: "Modify Response",
+ ApplicationAddRequest: "Add Request",
+ ApplicationAddResponse: "Add Response",
+ ApplicationDelRequest: "Del Request",
+ ApplicationDelResponse: "Del Response",
+ ApplicationModifyDNRequest: "Modify DN Request",
+ ApplicationModifyDNResponse: "Modify DN Response",
+ ApplicationCompareRequest: "Compare Request",
+ ApplicationCompareResponse: "Compare Response",
+ ApplicationAbandonRequest: "Abandon Request",
+ ApplicationSearchResultReference: "Search Result Reference",
+ ApplicationExtendedRequest: "Extended Request",
+ ApplicationExtendedResponse: "Extended Response",
+ ApplicationIntermediateResponse: "Intermediate Response",
+}
+
+// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
+const (
+ BeheraPasswordExpired = 0
+ BeheraAccountLocked = 1
+ BeheraChangeAfterReset = 2
+ BeheraPasswordModNotAllowed = 3
+ BeheraMustSupplyOldPassword = 4
+ BeheraInsufficientPasswordQuality = 5
+ BeheraPasswordTooShort = 6
+ BeheraPasswordTooYoung = 7
+ BeheraPasswordInHistory = 8
+)
+
+// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
+var BeheraPasswordPolicyErrorMap = map[int8]string{
+ BeheraPasswordExpired: "Password expired",
+ BeheraAccountLocked: "Account locked",
+ BeheraChangeAfterReset: "Password must be changed",
+ BeheraPasswordModNotAllowed: "Policy prevents password modification",
+ BeheraMustSupplyOldPassword: "Policy requires old password in order to change password",
+ BeheraInsufficientPasswordQuality: "Password fails quality checks",
+ BeheraPasswordTooShort: "Password is too short for policy",
+ BeheraPasswordTooYoung: "Password has been changed too recently",
+ BeheraPasswordInHistory: "New password is in list of old passwords",
+}
+
+var logger = log.New(os.Stderr, "", log.LstdFlags)
+
+// Logger allows clients to override the default logger
+func Logger(l *log.Logger) {
+ logger = l
+}
+
+// Adds descriptions to an LDAP Response packet for debugging
+func addLDAPDescriptions(packet *ber.Packet) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorDebugging, fmt.Errorf("ldap: cannot process packet to add descriptions: %s", r))
+ }
+ }()
+ packet.Description = "LDAP Response"
+ packet.Children[0].Description = "Message ID"
+
+ application := uint8(packet.Children[1].Tag)
+ packet.Children[1].Description = ApplicationMap[application]
+
+ switch application {
+ case ApplicationBindRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationBindResponse:
+ err = addDefaultLDAPResponseDescriptions(packet)
+ case ApplicationUnbindRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationSearchRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationSearchResultEntry:
+ packet.Children[1].Children[0].Description = "Object Name"
+ packet.Children[1].Children[1].Description = "Attributes"
+ for _, child := range packet.Children[1].Children[1].Children {
+ child.Description = "Attribute"
+ child.Children[0].Description = "Attribute Name"
+ child.Children[1].Description = "Attribute Values"
+ for _, grandchild := range child.Children[1].Children {
+ grandchild.Description = "Attribute Value"
+ }
+ }
+ if len(packet.Children) == 3 {
+ err = addControlDescriptions(packet.Children[2])
+ }
+ case ApplicationSearchResultDone:
+ err = addDefaultLDAPResponseDescriptions(packet)
+ case ApplicationModifyRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationModifyResponse:
+ case ApplicationAddRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationAddResponse:
+ case ApplicationDelRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationDelResponse:
+ case ApplicationModifyDNRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationModifyDNResponse:
+ case ApplicationCompareRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationCompareResponse:
+ case ApplicationAbandonRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationSearchResultReference:
+ case ApplicationExtendedRequest:
+ err = addRequestDescriptions(packet)
+ case ApplicationExtendedResponse:
+ }
+
+ return err
+}
+
+func addControlDescriptions(packet *ber.Packet) error {
+ packet.Description = "Controls"
+ for _, child := range packet.Children {
+ var value *ber.Packet
+ controlType := ""
+ child.Description = "Control"
+ switch len(child.Children) {
+ case 0:
+ // at least one child is required for control type
+ return fmt.Errorf("at least one child is required for control type")
+
+ case 1:
+ // just type, no criticality or value
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+
+ case 2:
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+ // Children[1] could be criticality or value (both are optional)
+ // duck-type on whether this is a boolean
+ if _, ok := child.Children[1].Value.(bool); ok {
+ child.Children[1].Description = "Criticality"
+ } else {
+ child.Children[1].Description = "Control Value"
+ value = child.Children[1]
+ }
+
+ case 3:
+ // criticality and value present
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+ child.Children[1].Description = "Criticality"
+ child.Children[2].Description = "Control Value"
+ value = child.Children[2]
+
+ default:
+ // more than 3 children is invalid
+ return fmt.Errorf("more than 3 children for control packet found")
+ }
+
+ if value == nil {
+ continue
+ }
+ switch controlType {
+ case ControlTypePaging:
+ value.Description += " (Paging)"
+ if value.Value != nil {
+ valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
+ if err != nil {
+ return fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ value.Data.Truncate(0)
+ value.Value = nil
+ valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
+ value.AppendChild(valueChildren)
+ }
+ value.Children[0].Description = "Real Search Control Value"
+ value.Children[0].Children[0].Description = "Paging Size"
+ value.Children[0].Children[1].Description = "Cookie"
+
+ case ControlTypeBeheraPasswordPolicy:
+ value.Description += " (Password Policy - Behera Draft)"
+ if value.Value != nil {
+ valueChildren, err := ber.DecodePacketErr(value.Data.Bytes())
+ if err != nil {
+ return fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+ sequence := value.Children[0]
+ for _, child := range sequence.Children {
+ if child.Tag == 0 {
+ // Warning
+ warningPacket := child.Children[0]
+ val, err := ber.ParseInt64(warningPacket.Data.Bytes())
+ if err != nil {
+ return fmt.Errorf("failed to decode data bytes: %s", err)
+ }
+ if warningPacket.Tag == 0 {
+ // timeBeforeExpiration
+ value.Description += " (TimeBeforeExpiration)"
+ warningPacket.Value = val
+ } else if warningPacket.Tag == 1 {
+ // graceAuthNsRemaining
+ value.Description += " (GraceAuthNsRemaining)"
+ warningPacket.Value = val
+ }
+ } else if child.Tag == 1 {
+ // Error
+ bs := child.Data.Bytes()
+ if len(bs) != 1 || bs[0] > 8 {
+ return fmt.Errorf("failed to decode data bytes: %s", "invalid PasswordPolicyResponse enum value")
+ }
+ val := int8(bs[0])
+ child.Description = "Error"
+ child.Value = val
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func addRequestDescriptions(packet *ber.Packet) error {
+ packet.Description = "LDAP Request"
+ packet.Children[0].Description = "Message ID"
+ packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
+ if len(packet.Children) == 3 {
+ return addControlDescriptions(packet.Children[2])
+ }
+ return nil
+}
+
+func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error {
+ resultCode := uint16(LDAPResultSuccess)
+ matchedDN := ""
+ description := "Success"
+ if err := GetLDAPError(packet); err != nil {
+ resultCode = err.(*Error).ResultCode
+ matchedDN = err.(*Error).MatchedDN
+ description = "Error Message"
+ }
+
+ packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")"
+ packet.Children[1].Children[1].Description = "Matched DN (" + matchedDN + ")"
+ packet.Children[1].Children[2].Description = description
+ if len(packet.Children[1].Children) > 3 {
+ packet.Children[1].Children[3].Description = "Referral"
+ }
+ if len(packet.Children) == 3 {
+ return addControlDescriptions(packet.Children[2])
+ }
+ return nil
+}
+
+// DebugBinaryFile reads and prints packets from the given filename
+func DebugBinaryFile(fileName string) error {
+ file, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return NewError(ErrorDebugging, err)
+ }
+ ber.PrintBytes(os.Stdout, file, "")
+ packet, err := ber.DecodePacketErr(file)
+ if err != nil {
+ return fmt.Errorf("failed to decode packet: %s", err)
+ }
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+
+ return nil
+}
+
+func mustEscape(c byte) bool {
+ return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
+}
+
+// EscapeFilter escapes from the provided LDAP filter string the special
+// characters in the set `()*\` and those out of the range 0 < c < 0x80,
+// as defined in RFC4515.
+func EscapeFilter(filter string) string {
+ const hexValues = "0123456789abcdef"
+ escape := 0
+ for i := 0; i < len(filter); i++ {
+ if mustEscape(filter[i]) {
+ escape++
+ }
+ }
+ if escape == 0 {
+ return filter
+ }
+ buf := make([]byte, len(filter)+escape*2)
+ for i, j := 0, 0; i < len(filter); i++ {
+ c := filter[i]
+ if mustEscape(c) {
+ buf[j+0] = '\\'
+ buf[j+1] = hexValues[c>>4]
+ buf[j+2] = hexValues[c&0xf]
+ j += 3
+ } else {
+ buf[j] = c
+ j++
+ }
+ }
+ return string(buf)
+}
+
+// EscapeDN escapes distinguished names as described in RFC4514. Characters in the
+// set `"+,;<>\` are escaped by prepending a backslash, which is also done for trailing
+// spaces or a leading `#`. Null bytes are replaced with `\00`.
+func EscapeDN(dn string) string {
+ if dn == "" {
+ return ""
+ }
+
+ builder := strings.Builder{}
+
+ for i, r := range dn {
+ // Escape leading and trailing spaces
+ if (i == 0 || i == len(dn)-1) && r == ' ' {
+ builder.WriteRune('\\')
+ builder.WriteRune(r)
+ continue
+ }
+
+ // Escape leading '#'
+ if i == 0 && r == '#' {
+ builder.WriteRune('\\')
+ builder.WriteRune(r)
+ continue
+ }
+
+ // Escape characters as defined in RFC4514
+ switch r {
+ case '"', '+', ',', ';', '<', '>', '\\':
+ builder.WriteRune('\\')
+ builder.WriteRune(r)
+ case '\x00': // Null byte may not be escaped by a leading backslash
+ builder.WriteString("\\00")
+ default:
+ builder.WriteRune(r)
+ }
+ }
+
+ return builder.String()
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/moddn.go b/vendor/github.com/go-ldap/ldap/v3/moddn.go
new file mode 100644
index 0000000..84a6488
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/moddn.go
@@ -0,0 +1,102 @@
+package ldap
+
+import (
+ "fmt"
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// ModifyDNRequest holds the request to modify a DN
+type ModifyDNRequest struct {
+ DN string
+ NewRDN string
+ DeleteOldRDN bool
+ NewSuperior string
+ // Controls hold optional controls to send with the request
+ Controls []Control
+}
+
+// NewModifyDNRequest creates a new request which can be passed to ModifyDN().
+//
+// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an
+// empty string for just changing the object's RDN.
+//
+// For moving the object without renaming, the "rdn" must be the first
+// RDN of the given DN.
+//
+// A call like
+//
+// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "")
+//
+// will setup the request to just rename uid=someone,dc=example,dc=org to
+// uid=newname,dc=example,dc=org.
+func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest {
+ return &ModifyDNRequest{
+ DN: dn,
+ NewRDN: rdn,
+ DeleteOldRDN: delOld,
+ NewSuperior: newSup,
+ }
+}
+
+// NewModifyDNWithControlsRequest creates a new request which can be passed to ModifyDN()
+// and also allows setting LDAP request controls.
+//
+// Refer NewModifyDNRequest for other parameters
+func NewModifyDNWithControlsRequest(dn string, rdn string, delOld bool,
+ newSup string, controls []Control) *ModifyDNRequest {
+ return &ModifyDNRequest{
+ DN: dn,
+ NewRDN: rdn,
+ DeleteOldRDN: delOld,
+ NewSuperior: newSup,
+ Controls: controls,
+ }
+}
+
+func (req *ModifyDNRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request")
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.NewRDN, "New RDN"))
+ if req.DeleteOldRDN {
+ buf := []byte{0xff}
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, string(buf), "Delete old RDN"))
+ } else {
+ pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.DeleteOldRDN, "Delete old RDN"))
+ }
+ if req.NewSuperior != "" {
+ pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.NewSuperior, "New Superior"))
+ }
+
+ envelope.AppendChild(pkt)
+ if len(req.Controls) > 0 {
+ envelope.AppendChild(encodeControls(req.Controls))
+ }
+
+ return nil
+}
+
+// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument
+// to NewModifyDNRequest() is not "").
+func (l *Conn) ModifyDN(m *ModifyDNRequest) error {
+ msgCtx, err := l.doRequest(m)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return err
+ }
+
+ if packet.Children[1].Tag == ApplicationModifyDNResponse {
+ err := GetLDAPError(packet)
+ if err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("ldap: unexpected response: %d", packet.Children[1].Tag)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/modify.go b/vendor/github.com/go-ldap/ldap/v3/modify.go
new file mode 100644
index 0000000..0e50136
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/modify.go
@@ -0,0 +1,181 @@
+package ldap
+
+import (
+ "errors"
+ "fmt"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// Change operation choices
+const (
+ AddAttribute = 0
+ DeleteAttribute = 1
+ ReplaceAttribute = 2
+ IncrementAttribute = 3 // (https://tools.ietf.org/html/rfc4525)
+)
+
+// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type PartialAttribute struct {
+ // Type is the type of the partial attribute
+ Type string
+ // Vals are the values of the partial attribute
+ Vals []string
+}
+
+func (p *PartialAttribute) encode() *ber.Packet {
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
+ seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
+ set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
+ for _, value := range p.Vals {
+ set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
+ }
+ seq.AppendChild(set)
+ return seq
+}
+
+// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type Change struct {
+ // Operation is the type of change to be made
+ Operation uint
+ // Modification is the attribute to be modified
+ Modification PartialAttribute
+}
+
+func (c *Change) encode() *ber.Packet {
+ change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+ change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation"))
+ change.AppendChild(c.Modification.encode())
+ return change
+}
+
+// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type ModifyRequest struct {
+ // DN is the distinguishedName of the directory entry to modify
+ DN string
+ // Changes contain the attributes to modify
+ Changes []Change
+ // Controls hold optional controls to send with the request
+ Controls []Control
+}
+
+// Add appends the given attribute to the list of changes to be made
+func (req *ModifyRequest) Add(attrType string, attrVals []string) {
+ req.appendChange(AddAttribute, attrType, attrVals)
+}
+
+// Delete appends the given attribute to the list of changes to be made
+func (req *ModifyRequest) Delete(attrType string, attrVals []string) {
+ req.appendChange(DeleteAttribute, attrType, attrVals)
+}
+
+// Replace appends the given attribute to the list of changes to be made
+func (req *ModifyRequest) Replace(attrType string, attrVals []string) {
+ req.appendChange(ReplaceAttribute, attrType, attrVals)
+}
+
+// Increment appends the given attribute to the list of changes to be made
+func (req *ModifyRequest) Increment(attrType string, attrVal string) {
+ req.appendChange(IncrementAttribute, attrType, []string{attrVal})
+}
+
+func (req *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) {
+ req.Changes = append(req.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}})
+}
+
+func (req *ModifyRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.DN, "DN"))
+ changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
+ for _, change := range req.Changes {
+ changes.AppendChild(change.encode())
+ }
+ pkt.AppendChild(changes)
+
+ envelope.AppendChild(pkt)
+ if len(req.Controls) > 0 {
+ envelope.AppendChild(encodeControls(req.Controls))
+ }
+
+ return nil
+}
+
+// NewModifyRequest creates a modify request for the given DN
+func NewModifyRequest(dn string, controls []Control) *ModifyRequest {
+ return &ModifyRequest{
+ DN: dn,
+ Controls: controls,
+ }
+}
+
+// Modify performs the ModifyRequest
+func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
+ msgCtx, err := l.doRequest(modifyRequest)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return err
+ }
+
+ if packet.Children[1].Tag == ApplicationModifyResponse {
+ err := GetLDAPError(packet)
+ if err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("ldap: unexpected response: %d", packet.Children[1].Tag)
+ }
+
+ return nil
+}
+
+// ModifyResult holds the server's response to a modify request
+type ModifyResult struct {
+ // Controls are the returned controls
+ Controls []Control
+ // Referral is the returned referral
+ Referral string
+}
+
+// ModifyWithResult performs the ModifyRequest and returns the result
+func (l *Conn) ModifyWithResult(modifyRequest *ModifyRequest) (*ModifyResult, error) {
+ msgCtx, err := l.doRequest(modifyRequest)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ result := &ModifyResult{
+ Controls: make([]Control, 0),
+ }
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ switch packet.Children[1].Tag {
+ case ApplicationModifyResponse:
+ if err = GetLDAPError(packet); err != nil {
+ result.Referral = getReferral(err, packet)
+
+ return result, err
+ }
+ if len(packet.Children) == 3 {
+ for _, child := range packet.Children[2].Children {
+ decodedChild, err := DecodeControl(child)
+ if err != nil {
+ return nil, errors.New("failed to decode child control: " + err.Error())
+ }
+ result.Controls = append(result.Controls, decodedChild)
+ }
+ }
+ }
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return result, nil
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go b/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go
new file mode 100644
index 0000000..72a2351
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/passwdmodify.go
@@ -0,0 +1,119 @@
+package ldap
+
+import (
+ "fmt"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+const (
+ passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
+)
+
+// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
+type PasswordModifyRequest struct {
+ // UserIdentity is an optional string representation of the user associated with the request.
+ // This string may or may not be an LDAPDN [RFC2253].
+ // If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
+ UserIdentity string
+ // OldPassword, if present, contains the user's current password
+ OldPassword string
+ // NewPassword, if present, contains the desired password for this user
+ NewPassword string
+}
+
+// PasswordModifyResult holds the server response to a PasswordModifyRequest
+type PasswordModifyResult struct {
+ // GeneratedPassword holds a password generated by the server, if present
+ GeneratedPassword string
+ // Referral are the returned referral
+ Referral string
+}
+
+func (req *PasswordModifyRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
+ pkt.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
+
+ extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
+ passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
+ if req.UserIdentity != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, req.UserIdentity, "User Identity"))
+ }
+ if req.OldPassword != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, req.OldPassword, "Old Password"))
+ }
+ if req.NewPassword != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, req.NewPassword, "New Password"))
+ }
+ extendedRequestValue.AppendChild(passwordModifyRequestValue)
+
+ pkt.AppendChild(extendedRequestValue)
+
+ envelope.AppendChild(pkt)
+
+ return nil
+}
+
+// NewPasswordModifyRequest creates a new PasswordModifyRequest
+//
+// According to the RFC 3602 (https://tools.ietf.org/html/rfc3062):
+// userIdentity is a string representing the user associated with the request.
+// This string may or may not be an LDAPDN (RFC 2253).
+// If userIdentity is empty then the operation will act on the user associated
+// with the session.
+//
+// oldPassword is the current user's password, it can be empty or it can be
+// needed depending on the session user access rights (usually an administrator
+// can change a user's password without knowing the current one) and the
+// password policy (see pwdSafeModify password policy's attribute)
+//
+// newPassword is the desired user's password. If empty the server can return
+// an error or generate a new password that will be available in the
+// PasswordModifyResult.GeneratedPassword
+func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
+ return &PasswordModifyRequest{
+ UserIdentity: userIdentity,
+ OldPassword: oldPassword,
+ NewPassword: newPassword,
+ }
+}
+
+// PasswordModify performs the modification request
+func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
+ msgCtx, err := l.doRequest(passwordModifyRequest)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ result := &PasswordModifyResult{}
+
+ if packet.Children[1].Tag == ApplicationExtendedResponse {
+ if err = GetLDAPError(packet); err != nil {
+ result.Referral = getReferral(err, packet)
+
+ return result, err
+ }
+ } else {
+ return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag))
+ }
+
+ extendedResponse := packet.Children[1]
+ for _, child := range extendedResponse.Children {
+ if child.Tag == ber.TagEmbeddedPDV {
+ passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes())
+ if len(passwordModifyResponseValue.Children) == 1 {
+ if passwordModifyResponseValue.Children[0].Tag == ber.TagEOC {
+ result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes())
+ }
+ }
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/request.go b/vendor/github.com/go-ldap/ldap/v3/request.go
new file mode 100644
index 0000000..b64f232
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/request.go
@@ -0,0 +1,110 @@
+package ldap
+
+import (
+ "errors"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+var (
+ errRespChanClosed = errors.New("ldap: response channel closed")
+ errCouldNotRetMsg = errors.New("ldap: could not retrieve message")
+ // ErrNilConnection is returned if doRequest is called with a nil connection.
+ ErrNilConnection = errors.New("ldap: conn is nil, expected net.Conn")
+)
+
+type request interface {
+ appendTo(*ber.Packet) error
+}
+
+type requestFunc func(*ber.Packet) error
+
+func (f requestFunc) appendTo(p *ber.Packet) error {
+ return f(p)
+}
+
+func (l *Conn) doRequest(req request) (*messageContext, error) {
+ if l == nil || l.conn == nil {
+ return nil, ErrNilConnection
+ }
+
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ if err := req.appendTo(packet); err != nil {
+ return nil, err
+ }
+
+ if l.Debug {
+ l.Debug.PrintPacket(packet)
+ }
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return nil, err
+ }
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return msgCtx, nil
+}
+
+func (l *Conn) readPacket(msgCtx *messageContext) (*ber.Packet, error) {
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errRespChanClosed)
+ }
+ packet, err := packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, err
+ }
+
+ if packet == nil {
+ return nil, NewError(ErrorNetwork, errCouldNotRetMsg)
+ }
+
+ if l.Debug {
+ if err = addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ l.Debug.PrintPacket(packet)
+ }
+ return packet, nil
+}
+
+func getReferral(err error, packet *ber.Packet) (referral string) {
+ if !IsErrorWithCode(err, LDAPResultReferral) {
+ return ""
+ }
+
+ if len(packet.Children) < 2 {
+ return ""
+ }
+
+ // The packet Tag itself (of child 2) is generally a ber.TagObjectDescriptor with referrals however OpenLDAP
+ // seemingly returns a ber.Tag.GeneralizedTime. Every currently tested LDAP server which returns referrals returns
+ // an ASN.1 BER packet with the Type of ber.TypeConstructed and Class of ber.ClassApplication however. Thus this
+ // check expressly checks these fields instead.
+ //
+ // Related Issues:
+ // - https://github.com/authelia/authelia/issues/4199 (downstream)
+ if len(packet.Children[1].Children) == 0 || (packet.Children[1].TagType != ber.TypeConstructed || packet.Children[1].ClassType != ber.ClassApplication) {
+ return ""
+ }
+
+ var ok bool
+
+ for _, child := range packet.Children[1].Children {
+ // The referral URI itself should be contained within a child which has a Tag of ber.BitString or
+ // ber.TagPrintableString, and the Type of ber.TypeConstructed and the Class of ClassContext. As soon as any of
+ // these conditions is not true we can skip this child.
+ if (child.Tag != ber.TagBitString && child.Tag != ber.TagPrintableString) || child.TagType != ber.TypeConstructed || child.ClassType != ber.ClassContext {
+ continue
+ }
+
+ if referral, ok = child.Children[0].Value.(string); ok {
+ return referral
+ }
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/response.go b/vendor/github.com/go-ldap/ldap/v3/response.go
new file mode 100644
index 0000000..0eae100
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/response.go
@@ -0,0 +1,206 @@
+package ldap
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// Response defines an interface to get data from an LDAP server
+type Response interface {
+ Entry() *Entry
+ Referral() string
+ Controls() []Control
+ Err() error
+ Next() bool
+}
+
+type searchResponse struct {
+ conn *Conn
+ ch chan *SearchSingleResult
+
+ entry *Entry
+ referral string
+ controls []Control
+ err error
+}
+
+// Entry returns an entry from the given search request
+func (r *searchResponse) Entry() *Entry {
+ return r.entry
+}
+
+// Referral returns a referral from the given search request
+func (r *searchResponse) Referral() string {
+ return r.referral
+}
+
+// Controls returns controls from the given search request
+func (r *searchResponse) Controls() []Control {
+ return r.controls
+}
+
+// Err returns an error when the given search request was failed
+func (r *searchResponse) Err() error {
+ return r.err
+}
+
+// Next returns whether next data exist or not
+func (r *searchResponse) Next() bool {
+ res, ok := <-r.ch
+ if !ok {
+ return false
+ }
+ if res == nil {
+ return false
+ }
+ r.err = res.Error
+ if r.err != nil {
+ return false
+ }
+ r.entry = res.Entry
+ r.referral = res.Referral
+ r.controls = res.Controls
+ return true
+}
+
+func (r *searchResponse) start(ctx context.Context, searchRequest *SearchRequest) {
+ go func() {
+ defer func() {
+ close(r.ch)
+ if err := recover(); err != nil {
+ r.conn.err = fmt.Errorf("ldap: recovered panic in searchResponse: %v", err)
+ }
+ }()
+
+ if r.conn.IsClosing() {
+ return
+ }
+
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, r.conn.nextMessageID(), "MessageID"))
+ // encode search request
+ err := searchRequest.appendTo(packet)
+ if err != nil {
+ r.ch <- &SearchSingleResult{Error: err}
+ return
+ }
+ r.conn.Debug.PrintPacket(packet)
+
+ msgCtx, err := r.conn.sendMessage(packet)
+ if err != nil {
+ r.ch <- &SearchSingleResult{Error: err}
+ return
+ }
+ defer r.conn.finishMessage(msgCtx)
+
+ foundSearchSingleResultDone := false
+ for !foundSearchSingleResultDone {
+ r.conn.Debug.Printf("%d: waiting for response", msgCtx.id)
+ select {
+ case <-ctx.Done():
+ r.conn.Debug.Printf("%d: %s", msgCtx.id, ctx.Err().Error())
+ return
+ case packetResponse, ok := <-msgCtx.responses:
+ if !ok {
+ err := NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ r.ch <- &SearchSingleResult{Error: err}
+ return
+ }
+ packet, err = packetResponse.ReadPacket()
+ r.conn.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ r.ch <- &SearchSingleResult{Error: err}
+ return
+ }
+
+ if r.conn.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ r.ch <- &SearchSingleResult{Error: err}
+ return
+ }
+ ber.PrintPacket(packet)
+ }
+
+ switch packet.Children[1].Tag {
+ case ApplicationSearchResultEntry:
+ result := &SearchSingleResult{
+ Entry: &Entry{
+ DN: packet.Children[1].Children[0].Value.(string),
+ Attributes: unpackAttributes(packet.Children[1].Children[1].Children),
+ },
+ }
+ if len(packet.Children) != 3 {
+ r.ch <- result
+ continue
+ }
+ decoded, err := DecodeControl(packet.Children[2].Children[0])
+ if err != nil {
+ werr := fmt.Errorf("failed to decode search result entry: %w", err)
+ result.Error = werr
+ r.ch <- result
+ return
+ }
+ result.Controls = append(result.Controls, decoded)
+ r.ch <- result
+
+ case ApplicationSearchResultDone:
+ if err := GetLDAPError(packet); err != nil {
+ r.ch <- &SearchSingleResult{Error: err}
+ return
+ }
+ if len(packet.Children) == 3 {
+ result := &SearchSingleResult{}
+ for _, child := range packet.Children[2].Children {
+ decodedChild, err := DecodeControl(child)
+ if err != nil {
+ werr := fmt.Errorf("failed to decode child control: %w", err)
+ r.ch <- &SearchSingleResult{Error: werr}
+ return
+ }
+ result.Controls = append(result.Controls, decodedChild)
+ }
+ r.ch <- result
+ }
+ foundSearchSingleResultDone = true
+
+ case ApplicationSearchResultReference:
+ ref := packet.Children[1].Children[0].Value.(string)
+ r.ch <- &SearchSingleResult{Referral: ref}
+
+ case ApplicationIntermediateResponse:
+ decoded, err := DecodeControl(packet.Children[1])
+ if err != nil {
+ werr := fmt.Errorf("failed to decode intermediate response: %w", err)
+ r.ch <- &SearchSingleResult{Error: werr}
+ return
+ }
+ result := &SearchSingleResult{}
+ result.Controls = append(result.Controls, decoded)
+ r.ch <- result
+
+ default:
+ err := fmt.Errorf("unknown tag: %d", packet.Children[1].Tag)
+ r.ch <- &SearchSingleResult{Error: err}
+ return
+ }
+ }
+ }
+ r.conn.Debug.Printf("%d: returning", msgCtx.id)
+ }()
+}
+
+func newSearchResponse(conn *Conn, bufferSize int) *searchResponse {
+ var ch chan *SearchSingleResult
+ if bufferSize > 0 {
+ ch = make(chan *SearchSingleResult, bufferSize)
+ } else {
+ ch = make(chan *SearchSingleResult)
+ }
+ return &searchResponse{
+ conn: conn,
+ ch: ch,
+ }
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/search.go b/vendor/github.com/go-ldap/ldap/v3/search.go
new file mode 100644
index 0000000..62be105
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/search.go
@@ -0,0 +1,715 @@
+package ldap
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// scope choices
+const (
+ ScopeBaseObject = 0
+ ScopeSingleLevel = 1
+ ScopeWholeSubtree = 2
+ // ScopeChildren is an OpenLDAP extension that may not be supported by another directory server.
+ // See: https://github.com/openldap/openldap/blob/7c55484ee153047efd0e562fc1638c1a2525f320/include/ldap.h#L598
+ ScopeChildren = 3
+)
+
+// ScopeMap contains human readable descriptions of scope choices
+var ScopeMap = map[int]string{
+ ScopeBaseObject: "Base Object",
+ ScopeSingleLevel: "Single Level",
+ ScopeWholeSubtree: "Whole Subtree",
+ ScopeChildren: "Children",
+}
+
+// derefAliases
+const (
+ NeverDerefAliases = 0
+ DerefInSearching = 1
+ DerefFindingBaseObj = 2
+ DerefAlways = 3
+)
+
+// DerefMap contains human readable descriptions of derefAliases choices
+var DerefMap = map[int]string{
+ NeverDerefAliases: "NeverDerefAliases",
+ DerefInSearching: "DerefInSearching",
+ DerefFindingBaseObj: "DerefFindingBaseObj",
+ DerefAlways: "DerefAlways",
+}
+
+// ErrSizeLimitExceeded will be returned if the search result is exceeding the defined SizeLimit
+// and enforcing the requested limit is enabled in the search request (EnforceSizeLimit)
+var ErrSizeLimitExceeded = NewError(ErrorNetwork, errors.New("ldap: size limit exceeded"))
+
+// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
+// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
+// same input map of attributes, the output entry will contain the same order of attributes
+func NewEntry(dn string, attributes map[string][]string) *Entry {
+ var attributeNames []string
+ for attributeName := range attributes {
+ attributeNames = append(attributeNames, attributeName)
+ }
+ sort.Strings(attributeNames)
+
+ var encodedAttributes []*EntryAttribute
+ for _, attributeName := range attributeNames {
+ encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
+ }
+ return &Entry{
+ DN: dn,
+ Attributes: encodedAttributes,
+ }
+}
+
+// Entry represents a single search result entry
+type Entry struct {
+ // DN is the distinguished name of the entry
+ DN string
+ // Attributes are the returned attributes for the entry
+ Attributes []*EntryAttribute
+}
+
+// GetAttributeValues returns the values for the named attribute, or an empty list
+func (e *Entry) GetAttributeValues(attribute string) []string {
+ for _, attr := range e.Attributes {
+ if attr.Name == attribute {
+ return attr.Values
+ }
+ }
+ return []string{}
+}
+
+// GetEqualFoldAttributeValues returns the values for the named attribute, or an
+// empty list. Attribute matching is done with strings.EqualFold.
+func (e *Entry) GetEqualFoldAttributeValues(attribute string) []string {
+ for _, attr := range e.Attributes {
+ if strings.EqualFold(attribute, attr.Name) {
+ return attr.Values
+ }
+ }
+ return []string{}
+}
+
+// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
+func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
+ for _, attr := range e.Attributes {
+ if attr.Name == attribute {
+ return attr.ByteValues
+ }
+ }
+ return [][]byte{}
+}
+
+// GetEqualFoldRawAttributeValues returns the byte values for the named attribute, or an empty list
+func (e *Entry) GetEqualFoldRawAttributeValues(attribute string) [][]byte {
+ for _, attr := range e.Attributes {
+ if strings.EqualFold(attr.Name, attribute) {
+ return attr.ByteValues
+ }
+ }
+ return [][]byte{}
+}
+
+// GetAttributeValue returns the first value for the named attribute, or ""
+func (e *Entry) GetAttributeValue(attribute string) string {
+ values := e.GetAttributeValues(attribute)
+ if len(values) == 0 {
+ return ""
+ }
+ return values[0]
+}
+
+// GetEqualFoldAttributeValue returns the first value for the named attribute, or "".
+// Attribute comparison is done with strings.EqualFold.
+func (e *Entry) GetEqualFoldAttributeValue(attribute string) string {
+ values := e.GetEqualFoldAttributeValues(attribute)
+ if len(values) == 0 {
+ return ""
+ }
+ return values[0]
+}
+
+// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
+func (e *Entry) GetRawAttributeValue(attribute string) []byte {
+ values := e.GetRawAttributeValues(attribute)
+ if len(values) == 0 {
+ return []byte{}
+ }
+ return values[0]
+}
+
+// GetEqualFoldRawAttributeValue returns the first value for the named attribute, or an empty slice
+func (e *Entry) GetEqualFoldRawAttributeValue(attribute string) []byte {
+ values := e.GetEqualFoldRawAttributeValues(attribute)
+ if len(values) == 0 {
+ return []byte{}
+ }
+ return values[0]
+}
+
+// Print outputs a human-readable description
+func (e *Entry) Print() {
+ fmt.Printf("DN: %s\n", e.DN)
+ for _, attr := range e.Attributes {
+ attr.Print()
+ }
+}
+
+// PrettyPrint outputs a human-readable description indenting
+func (e *Entry) PrettyPrint(indent int) {
+ fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
+ for _, attr := range e.Attributes {
+ attr.PrettyPrint(indent + 2)
+ }
+}
+
+// Describe the tag to use for struct field tags
+const decoderTagName = "ldap"
+
+// readTag will read the reflect.StructField value for
+// the key defined in decoderTagName. If omitempty is
+// specified, the field may not be filled.
+func readTag(f reflect.StructField) (string, bool) {
+ val, ok := f.Tag.Lookup(decoderTagName)
+ if !ok {
+ return f.Name, false
+ }
+ opts := strings.Split(val, ",")
+ omit := false
+ if len(opts) == 2 {
+ omit = opts[1] == "omitempty"
+ }
+ return opts[0], omit
+}
+
+// Unmarshal parses the Entry in the value pointed to by i
+//
+// Currently, this methods only supports struct fields of type
+// string, *string, []string, int, int64, []byte, *DN, []*DN or time.Time.
+// Other field types will not be regarded. If the field type is a string or int but multiple
+// attribute values are returned, the first value will be used to fill the field.
+//
+// Example:
+//
+// type UserEntry struct {
+// // Fields with the tag key `dn` are automatically filled with the
+// // objects distinguishedName. This can be used multiple times.
+// DN string `ldap:"dn"`
+//
+// // This field will be filled with the attribute value for
+// // userPrincipalName. An attribute can be read into a struct field
+// // multiple times. Missing attributes will not result in an error.
+// UserPrincipalName string `ldap:"userPrincipalName"`
+//
+// // memberOf may have multiple values. If you don't
+// // know the amount of attribute values at runtime, use a string array.
+// MemberOf []string `ldap:"memberOf"`
+//
+// // ID is an integer value, it will fail unmarshaling when the given
+// // attribute value cannot be parsed into an integer.
+// ID int `ldap:"id"`
+//
+// // LongID is similar to ID but uses an int64 instead.
+// LongID int64 `ldap:"longId"`
+//
+// // Data is similar to MemberOf a slice containing all attribute
+// // values.
+// Data []byte `ldap:"data"`
+//
+// // Time is parsed with the generalizedTime spec into a time.Time
+// Created time.Time `ldap:"createdTimestamp"`
+//
+// // *DN is parsed with the ParseDN
+// Owner *ldap.DN `ldap:"owner"`
+//
+// // []*DN is parsed with the ParseDN
+// Children []*ldap.DN `ldap:"children"`
+//
+// // This won't work, as the field is not of type string. For this
+// // to work, you'll have to temporarily store the result in string
+// // (or string array) and convert it to the desired type afterwards.
+// UserAccountControl uint32 `ldap:"userPrincipalName"`
+// }
+// user := UserEntry{}
+//
+// if err := result.Unmarshal(&user); err != nil {
+// // ...
+// }
+func (e *Entry) Unmarshal(i interface{}) (err error) {
+ // Make sure it's a ptr
+ if vo := reflect.ValueOf(i).Kind(); vo != reflect.Ptr {
+ return fmt.Errorf("ldap: cannot use %s, expected pointer to a struct", vo)
+ }
+
+ sv, st := reflect.ValueOf(i).Elem(), reflect.TypeOf(i).Elem()
+ // Make sure it's pointing to a struct
+ if sv.Kind() != reflect.Struct {
+ return fmt.Errorf("ldap: expected pointer to a struct, got %s", sv.Kind())
+ }
+
+ for n := 0; n < st.NumField(); n++ {
+ // Holds struct field value and type
+ fv, ft := sv.Field(n), st.Field(n)
+
+ // skip unexported fields
+ if ft.PkgPath != "" {
+ continue
+ }
+
+ // omitempty can be safely discarded, as it's not needed when unmarshalling
+ fieldTag, _ := readTag(ft)
+
+ // Fill the field with the distinguishedName if the tag key is `dn`
+ if fieldTag == "dn" {
+ fv.SetString(e.DN)
+ continue
+ }
+
+ values := e.GetAttributeValues(fieldTag)
+ if len(values) == 0 {
+ continue
+ }
+
+ switch fv.Interface().(type) {
+ case []string:
+ for _, item := range values {
+ fv.Set(reflect.Append(fv, reflect.ValueOf(item)))
+ }
+ case string:
+ fv.SetString(values[0])
+ case *string:
+ fv.Set(reflect.ValueOf(&values[0]))
+ case []byte:
+ fv.SetBytes([]byte(values[0]))
+ case int, int64:
+ intVal, err := strconv.ParseInt(values[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("ldap: could not parse value '%s' into int field", values[0])
+ }
+ fv.SetInt(intVal)
+ case time.Time:
+ t, err := ber.ParseGeneralizedTime([]byte(values[0]))
+ if err != nil {
+ return fmt.Errorf("ldap: could not parse value '%s' into time.Time field", values[0])
+ }
+ fv.Set(reflect.ValueOf(t))
+ case *DN:
+ dn, err := ParseDN(values[0])
+ if err != nil {
+ return fmt.Errorf("ldap: could not parse value '%s' into *ldap.DN field", values[0])
+ }
+ fv.Set(reflect.ValueOf(dn))
+ case []*DN:
+ for _, item := range values {
+ dn, err := ParseDN(item)
+ if err != nil {
+ return fmt.Errorf("ldap: could not parse value '%s' into *ldap.DN field", item)
+ }
+ fv.Set(reflect.Append(fv, reflect.ValueOf(dn)))
+ }
+ default:
+ return fmt.Errorf("ldap: expected field to be of type string, *string, []string, int, int64, []byte, *DN, []*DN or time.Time, got %v", ft.Type)
+ }
+ }
+ return
+}
+
+// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
+func NewEntryAttribute(name string, values []string) *EntryAttribute {
+ var bytes [][]byte
+ for _, value := range values {
+ bytes = append(bytes, []byte(value))
+ }
+ return &EntryAttribute{
+ Name: name,
+ Values: values,
+ ByteValues: bytes,
+ }
+}
+
+// EntryAttribute holds a single attribute
+type EntryAttribute struct {
+ // Name is the name of the attribute
+ Name string
+ // Values contain the string values of the attribute
+ Values []string
+ // ByteValues contain the raw values of the attribute
+ ByteValues [][]byte
+}
+
+// Print outputs a human-readable description
+func (e *EntryAttribute) Print() {
+ fmt.Printf("%s: %s\n", e.Name, e.Values)
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (e *EntryAttribute) PrettyPrint(indent int) {
+ fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
+}
+
+// SearchResult holds the server's response to a search request
+type SearchResult struct {
+ // Entries are the returned entries
+ Entries []*Entry
+ // Referrals are the returned referrals
+ Referrals []string
+ // Controls are the returned controls
+ Controls []Control
+}
+
+// Print outputs a human-readable description
+func (s *SearchResult) Print() {
+ for _, entry := range s.Entries {
+ entry.Print()
+ }
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (s *SearchResult) PrettyPrint(indent int) {
+ for _, entry := range s.Entries {
+ entry.PrettyPrint(indent)
+ }
+}
+
+// appendTo appends all entries of `s` to `r`
+func (s *SearchResult) appendTo(r *SearchResult) {
+ r.Entries = append(r.Entries, s.Entries...)
+ r.Referrals = append(r.Referrals, s.Referrals...)
+ r.Controls = append(r.Controls, s.Controls...)
+}
+
+// SearchSingleResult holds the server's single entry response to a search request
+type SearchSingleResult struct {
+ // Entry is the returned entry
+ Entry *Entry
+ // Referral is the returned referral
+ Referral string
+ // Controls are the returned controls
+ Controls []Control
+ // Error is set when the search request was failed
+ Error error
+}
+
+// Print outputs a human-readable description
+func (s *SearchSingleResult) Print() {
+ s.Entry.Print()
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (s *SearchSingleResult) PrettyPrint(indent int) {
+ s.Entry.PrettyPrint(indent)
+}
+
+// SearchRequest represents a search request to send to the server
+type SearchRequest struct {
+ BaseDN string
+ Scope int
+ DerefAliases int
+ SizeLimit int
+ TimeLimit int
+ TypesOnly bool
+ Filter string
+ Attributes []string
+ Controls []Control
+
+ // EnforceSizeLimit will hard limit the maximum number of entries parsed, in case the directory
+ // server returns more results than requested. This setting is disabled by default and does not
+ // work in async search requests.
+ EnforceSizeLimit bool
+}
+
+func (req *SearchRequest) appendTo(envelope *ber.Packet) error {
+ pkt := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
+ pkt.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, req.BaseDN, "Base DN"))
+ pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.Scope), "Scope"))
+ pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(req.DerefAliases), "Deref Aliases"))
+ pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.SizeLimit), "Size Limit"))
+ pkt.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(req.TimeLimit), "Time Limit"))
+ pkt.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, req.TypesOnly, "Types Only"))
+ // compile and encode filter
+ filterPacket, err := CompileFilter(req.Filter)
+ if err != nil {
+ return err
+ }
+ pkt.AppendChild(filterPacket)
+ // encode attributes
+ attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
+ for _, attribute := range req.Attributes {
+ attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+ }
+ pkt.AppendChild(attributesPacket)
+
+ envelope.AppendChild(pkt)
+ if len(req.Controls) > 0 {
+ envelope.AppendChild(encodeControls(req.Controls))
+ }
+
+ return nil
+}
+
+// NewSearchRequest creates a new search request
+func NewSearchRequest(
+ BaseDN string,
+ Scope, DerefAliases, SizeLimit, TimeLimit int,
+ TypesOnly bool,
+ Filter string,
+ Attributes []string,
+ Controls []Control,
+) *SearchRequest {
+ return &SearchRequest{
+ BaseDN: BaseDN,
+ Scope: Scope,
+ DerefAliases: DerefAliases,
+ SizeLimit: SizeLimit,
+ TimeLimit: TimeLimit,
+ TypesOnly: TypesOnly,
+ Filter: Filter,
+ Attributes: Attributes,
+ Controls: Controls,
+ }
+}
+
+// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
+// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
+// The following four cases are possible given the arguments:
+// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
+// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
+// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
+// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
+//
+// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
+func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
+ var pagingControl *ControlPaging
+
+ control := FindControl(searchRequest.Controls, ControlTypePaging)
+ if control == nil {
+ pagingControl = NewControlPaging(pagingSize)
+ searchRequest.Controls = append(searchRequest.Controls, pagingControl)
+ } else {
+ castControl, ok := control.(*ControlPaging)
+ if !ok {
+ return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control)
+ }
+ if castControl.PagingSize != pagingSize {
+ return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
+ }
+ pagingControl = castControl
+ }
+
+ searchResult := new(SearchResult)
+ for {
+ result, err := l.Search(searchRequest)
+ if result != nil {
+ result.appendTo(searchResult)
+ } else {
+ if err == nil {
+ // We have to do this beautifulness in case something absolutely strange happens, which
+ // should only occur in case there is no packet, but also no error.
+ return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
+ }
+ }
+ if err != nil {
+ // If an error occurred, all results that have been received so far will be returned
+ return searchResult, err
+ }
+
+ l.Debug.Printf("Looking for Paging Control...")
+ pagingResult := FindControl(result.Controls, ControlTypePaging)
+ if pagingResult == nil {
+ pagingControl = nil
+ l.Debug.Printf("Could not find paging control. Breaking...")
+ break
+ }
+
+ cookie := pagingResult.(*ControlPaging).Cookie
+ if len(cookie) == 0 {
+ pagingControl = nil
+ l.Debug.Printf("Could not find cookie. Breaking...")
+ break
+ }
+ pagingControl.SetCookie(cookie)
+ }
+
+ if pagingControl != nil {
+ l.Debug.Printf("Abandoning Paging...")
+ pagingControl.PagingSize = 0
+ if _, err := l.Search(searchRequest); err != nil {
+ return searchResult, err
+ }
+ }
+
+ return searchResult, nil
+}
+
+// Search performs the given search request
+func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
+ msgCtx, err := l.doRequest(searchRequest)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ result := &SearchResult{
+ Entries: make([]*Entry, 0),
+ Referrals: make([]string, 0),
+ Controls: make([]Control, 0),
+ }
+
+ for {
+ packet, err := l.readPacket(msgCtx)
+ if err != nil {
+ return result, err
+ }
+
+ switch packet.Children[1].Tag {
+ case 4:
+ if searchRequest.EnforceSizeLimit &&
+ searchRequest.SizeLimit > 0 &&
+ len(result.Entries) >= searchRequest.SizeLimit {
+ return result, ErrSizeLimitExceeded
+ }
+
+ attr := make([]*ber.Packet, 0)
+ if len(packet.Children[1].Children) > 1 {
+ attr = packet.Children[1].Children[1].Children
+ }
+ entry := &Entry{
+ DN: packet.Children[1].Children[0].Value.(string),
+ Attributes: unpackAttributes(attr),
+ }
+ result.Entries = append(result.Entries, entry)
+ case 5:
+ err := GetLDAPError(packet)
+ if err != nil {
+ return result, err
+ }
+ if len(packet.Children) == 3 {
+ for _, child := range packet.Children[2].Children {
+ decodedChild, err := DecodeControl(child)
+ if err != nil {
+ return result, fmt.Errorf("failed to decode child control: %s", err)
+ }
+ result.Controls = append(result.Controls, decodedChild)
+ }
+ }
+ return result, nil
+ case 19:
+ result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
+ }
+ }
+}
+
+// SearchAsync performs a search request and returns all search results asynchronously.
+// This means you get all results until an error happens (or the search successfully finished),
+// e.g. for size / time limited requests all are recieved until the limit is reached.
+// To stop the search, call cancel function of the context.
+func (l *Conn) SearchAsync(
+ ctx context.Context, searchRequest *SearchRequest, bufferSize int) Response {
+ r := newSearchResponse(l, bufferSize)
+ r.start(ctx, searchRequest)
+ return r
+}
+
+// Syncrepl is a short name for LDAP Sync Replication engine that works on the
+// consumer-side. This can perform a persistent search and returns an entry
+// when the entry is updated on the server side.
+// To stop the search, call cancel function of the context.
+func (l *Conn) Syncrepl(
+ ctx context.Context, searchRequest *SearchRequest, bufferSize int,
+ mode ControlSyncRequestMode, cookie []byte, reloadHint bool,
+) Response {
+ control := NewControlSyncRequest(mode, cookie, reloadHint)
+ searchRequest.Controls = append(searchRequest.Controls, control)
+ r := newSearchResponse(l, bufferSize)
+ r.start(ctx, searchRequest)
+ return r
+}
+
+// unpackAttributes will extract all given LDAP attributes and it's values
+// from the ber.Packet
+func unpackAttributes(children []*ber.Packet) []*EntryAttribute {
+ entries := make([]*EntryAttribute, len(children))
+ for i, child := range children {
+ length := len(child.Children[1].Children)
+ entry := &EntryAttribute{
+ Name: child.Children[0].Value.(string),
+ // pre-allocate the slice since we can determine
+ // the number of attributes at this point
+ Values: make([]string, length),
+ ByteValues: make([][]byte, length),
+ }
+
+ for i, value := range child.Children[1].Children {
+ entry.ByteValues[i] = value.ByteValue
+ entry.Values[i] = value.Value.(string)
+ }
+ entries[i] = entry
+ }
+
+ return entries
+}
+
+// DirSync does a Search with dirSync Control.
+func (l *Conn) DirSync(
+ searchRequest *SearchRequest, flags int64, maxAttrCount int64, cookie []byte,
+) (*SearchResult, error) {
+ control := FindControl(searchRequest.Controls, ControlTypeDirSync)
+ if control == nil {
+ c := NewRequestControlDirSync(flags, maxAttrCount, cookie)
+ searchRequest.Controls = append(searchRequest.Controls, c)
+ } else {
+ c := control.(*ControlDirSync)
+ if c.Flags != flags {
+ return nil, fmt.Errorf("flags given in search request (%d) conflicts with flags given in search call (%d)", c.Flags, flags)
+ }
+ if c.MaxAttrCount != maxAttrCount {
+ return nil, fmt.Errorf("MaxAttrCnt given in search request (%d) conflicts with maxAttrCount given in search call (%d)", c.MaxAttrCount, maxAttrCount)
+ }
+ }
+ searchResult, err := l.Search(searchRequest)
+ l.Debug.Printf("Looking for result...")
+ if err != nil {
+ return nil, err
+ }
+ if searchResult == nil {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
+ }
+
+ l.Debug.Printf("Looking for DirSync Control...")
+ resultControl := FindControl(searchResult.Controls, ControlTypeDirSync)
+ if resultControl == nil {
+ l.Debug.Printf("Could not find dirSyncControl control. Breaking...")
+ return searchResult, nil
+ }
+
+ cookie = resultControl.(*ControlDirSync).Cookie
+ if len(cookie) == 0 {
+ l.Debug.Printf("Could not find cookie. Breaking...")
+ return searchResult, nil
+ }
+
+ return searchResult, nil
+}
+
+// DirSyncDirSyncAsync performs a search request and returns all search results
+// asynchronously. This is efficient when the server returns lots of entries.
+func (l *Conn) DirSyncAsync(
+ ctx context.Context, searchRequest *SearchRequest, bufferSize int,
+ flags, maxAttrCount int64, cookie []byte,
+) Response {
+ control := NewRequestControlDirSync(flags, maxAttrCount, cookie)
+ searchRequest.Controls = append(searchRequest.Controls, control)
+ r := newSearchResponse(l, bufferSize)
+ r.start(ctx, searchRequest)
+ return r
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/unbind.go b/vendor/github.com/go-ldap/ldap/v3/unbind.go
new file mode 100644
index 0000000..10cf75c
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/unbind.go
@@ -0,0 +1,38 @@
+package ldap
+
+import (
+ "errors"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+// ErrConnUnbound is returned when Unbind is called on an already closing connection.
+var ErrConnUnbound = NewError(ErrorNetwork, errors.New("ldap: connection is closed"))
+
+type unbindRequest struct{}
+
+func (unbindRequest) appendTo(envelope *ber.Packet) error {
+ envelope.AppendChild(ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationUnbindRequest, nil, ApplicationMap[ApplicationUnbindRequest]))
+ return nil
+}
+
+// Unbind will perform an unbind request. The Unbind operation
+// should be thought of as the "quit" operation.
+// See https://datatracker.ietf.org/doc/html/rfc4511#section-4.3
+func (l *Conn) Unbind() error {
+ if l.IsClosing() {
+ return ErrConnUnbound
+ }
+
+ _, err := l.doRequest(unbindRequest{})
+ if err != nil {
+ return err
+ }
+
+ // Sending an unbindRequest will make the connection unusable.
+ // Pending requests will fail with:
+ // LDAP Result Code 200 "Network Error": ldap: response channel closed
+ l.Close()
+
+ return nil
+}
diff --git a/vendor/github.com/go-ldap/ldap/v3/whoami.go b/vendor/github.com/go-ldap/ldap/v3/whoami.go
new file mode 100644
index 0000000..10c523d
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/v3/whoami.go
@@ -0,0 +1,91 @@
+package ldap
+
+// This file contains the "Who Am I?" extended operation as specified in rfc 4532
+//
+// https://tools.ietf.org/html/rfc4532
+
+import (
+ "errors"
+ "fmt"
+
+ ber "github.com/go-asn1-ber/asn1-ber"
+)
+
+type whoAmIRequest bool
+
+// WhoAmIResult is returned by the WhoAmI() call
+type WhoAmIResult struct {
+ AuthzID string
+}
+
+func (r whoAmIRequest) encode() (*ber.Packet, error) {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Who Am I? Extended Operation")
+ request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, ControlTypeWhoAmI, "Extended Request Name: Who Am I? OID"))
+ return request, nil
+}
+
+// WhoAmI returns the authzId the server thinks we are, you may pass controls
+// like a Proxied Authorization control
+func (l *Conn) WhoAmI(controls []Control) (*WhoAmIResult, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ req := whoAmIRequest(true)
+ encodedWhoAmIRequest, err := req.encode()
+ if err != nil {
+ return nil, err
+ }
+ packet.AppendChild(encodedWhoAmIRequest)
+
+ if len(controls) != 0 {
+ packet.AppendChild(encodeControls(controls))
+ }
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ result := &WhoAmIResult{}
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, err
+ }
+
+ if packet == nil {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message"))
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationExtendedResponse {
+ if err := GetLDAPError(packet); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag))
+ }
+
+ extendedResponse := packet.Children[1]
+ for _, child := range extendedResponse.Children {
+ if child.Tag == 11 {
+ result.AuthzID = ber.DecodeString(child.Data.Bytes())
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/.gitignore b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
new file mode 100644
index 0000000..09573e0
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+.idea/
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/LICENSE b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
new file mode 100644
index 0000000..35dbc25
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
new file mode 100644
index 0000000..ff9c57e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
@@ -0,0 +1,195 @@
+# Migration Guide (v5.0.0)
+
+Version `v5` contains a major rework of core functionalities in the `jwt-go`
+library. This includes support for several validation options as well as a
+re-design of the `Claims` interface. Lastly, we reworked how errors work under
+the hood, which should provide a better overall developer experience.
+
+Starting from [v5.0.0](https://github.com/golang-jwt/jwt/releases/tag/v5.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v5"
+
+For most users, changing the import path *should* suffice. However, since we
+intentionally changed and cleaned some of the public API, existing programs
+might need to be updated. The following sections describe significant changes
+and corresponding updates for existing programs.
+
+## Parsing and Validation Options
+
+Under the hood, a new `Validator` struct takes care of validating the claims. A
+long awaited feature has been the option to fine-tune the validation of tokens.
+This is now possible with several `ParserOption` functions that can be appended
+to most `Parse` functions, such as `ParseWithClaims`. The most important options
+and changes are:
+ * Added `WithLeeway` to support specifying the leeway that is allowed when
+ validating time-based claims, such as `exp` or `nbf`.
+ * Changed default behavior to not check the `iat` claim. Usage of this claim
+ is OPTIONAL according to the JWT RFC. The claim itself is also purely
+ informational according to the RFC, so a strict validation failure is not
+ recommended. If you want to check for sensible values in these claims,
+ please use the `WithIssuedAt` parser option.
+ * Added `WithAudience`, `WithSubject` and `WithIssuer` to support checking for
+ expected `aud`, `sub` and `iss`.
+ * Added `WithStrictDecoding` and `WithPaddingAllowed` options to allow
+ previously global settings to enable base64 strict encoding and the parsing
+ of base64 strings with padding. The latter is strictly speaking against the
+ standard, but unfortunately some of the major identity providers issue some
+ of these incorrect tokens. Both options are disabled by default.
+
+## Changes to the `Claims` interface
+
+### Complete Restructuring
+
+Previously, the claims interface was satisfied with an implementation of a
+`Valid() error` function. This had several issues:
+ * The different claim types (struct claims, map claims, etc.) then contained
+ similar (but not 100 % identical) code of how this validation was done. This
+ lead to a lot of (almost) duplicate code and was hard to maintain
+ * It was not really semantically close to what a "claim" (or a set of claims)
+ really is; which is a list of defined key/value pairs with a certain
+ semantic meaning.
+
+Since all the validation functionality is now extracted into the validator, all
+`VerifyXXX` and `Valid` functions have been removed from the `Claims` interface.
+Instead, the interface now represents a list of getters to retrieve values with
+a specific meaning. This allows us to completely decouple the validation logic
+with the underlying storage representation of the claim, which could be a
+struct, a map or even something stored in a database.
+
+```go
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
+```
+
+Users that previously directly called the `Valid` function on their claims,
+e.g., to perform validation independently of parsing/verifying a token, can now
+use the `jwt.NewValidator` function to create a `Validator` independently of the
+`Parser`.
+
+```go
+var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second))
+v.Validate(myClaims)
+```
+
+### Supported Claim Types and Removal of `StandardClaims`
+
+The two standard claim types supported by this library, `MapClaims` and
+`RegisteredClaims` both implement the necessary functions of this interface. The
+old `StandardClaims` struct, which has already been deprecated in `v4` is now
+removed.
+
+Users using custom claims, in most cases, will not experience any changes in the
+behavior as long as they embedded `RegisteredClaims`. If they created a new
+claim type from scratch, they now need to implemented the proper getter
+functions.
+
+### Migrating Application Specific Logic of the old `Valid`
+
+Previously, users could override the `Valid` method in a custom claim, for
+example to extend the validation with application-specific claims. However, this
+was always very dangerous, since once could easily disable the standard
+validation and signature checking.
+
+In order to avoid that, while still supporting the use-case, a new
+`ClaimsValidator` interface has been introduced. This interface consists of the
+`Validate() error` function. If the validator sees, that a `Claims` struct
+implements this interface, the errors returned to the `Validate` function will
+be *appended* to the regular standard validation. It is not possible to disable
+the standard validation anymore (even only by accident).
+
+Usage examples can be found in [example_test.go](./example_test.go), to build
+claims structs like the following.
+
+```go
+// MyCustomClaims includes all registered claims, plus Foo.
+type MyCustomClaims struct {
+ Foo string `json:"foo"`
+ jwt.RegisteredClaims
+}
+
+// Validate can be used to execute additional application-specific claims
+// validation.
+func (m MyCustomClaims) Validate() error {
+ if m.Foo != "bar" {
+ return errors.New("must be foobar")
+ }
+
+ return nil
+}
+```
+
+## Changes to the `Token` and `Parser` struct
+
+The previously global functions `DecodeSegment` and `EncodeSegment` were moved
+to the `Parser` and `Token` struct respectively. This will allow us in the
+future to configure the behavior of these two based on options supplied on the
+parser or the token (creation). This also removes two previously global
+variables and moves them to parser options `WithStrictDecoding` and
+`WithPaddingAllowed`.
+
+In order to do that, we had to adjust the way signing methods work. Previously
+they were given a base64 encoded signature in `Verify` and were expected to
+return a base64 encoded version of the signature in `Sign`, both as a `string`.
+However, this made it necessary to have `DecodeSegment` and `EncodeSegment`
+global and was a less than perfect design because we were repeating
+encoding/decoding steps for all signing methods. Now, `Sign` and `Verify`
+operate on a decoded signature as a `[]byte`, which feels more natural for a
+cryptographic operation anyway. Lastly, `Parse` and `SignedString` take care of
+the final encoding/decoding part.
+
+In addition to that, we also changed the `Signature` field on `Token` from a
+`string` to `[]byte` and this is also now populated with the decoded form. This
+is also more consistent, because the other parts of the JWT, mainly `Header` and
+`Claims` were already stored in decoded form in `Token`. Only the signature was
+stored in base64 encoded form, which was redundant with the information in the
+`Raw` field, which contains the complete token as base64.
+
+```go
+type Token struct {
+ Raw string // Raw contains the raw token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]interface{} // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form
+ Valid bool // Valid specifies if the token is valid
+}
+```
+
+Most (if not all) of these changes should not impact the normal usage of this
+library. Only users directly accessing the `Signature` field as well as
+developers of custom signing methods should be affected.
+
+# Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in
+this repo, as well as `github.com/dgrijalva/jwt-go`. For most users this should
+be a drop-in replacement, if you're having troubles migrating, please open an
+issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or
+`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually
+or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+# Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at
+https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md
new file mode 100644
index 0000000..0bb636f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/README.md
@@ -0,0 +1,167 @@
+# jwt-go
+
+[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+[](https://coveralls.io/github/golang-jwt/jwt?branch=main)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness)
+implementation of [JSON Web
+Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
+this project adds Go module support, but maintains backward compatibility with
+older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
+[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
+v5.0.0 introduces major improvements to the validation of tokens, but is not
+entirely backward compatible.
+
+> After the original author of the library suggested migrating the maintenance
+> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
+> existing library into this repository. See
+> [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a
+> detailed discussion on this topic.
+
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the
+crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue
+[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
+detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is
+what you
+expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
+This library attempts to make it easy to do the right thing by requiring key
+types to match the expected alg, but you should take the extra step to verify it in
+your usage. See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release
+policy](https://golang.org/doc/devel/release#policy). So we will support a major
+version of Go until there are two newer major releases. We no longer support
+building jwt-go with unsupported Go versions, as these contain security
+vulnerabilities that will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web
+Tokens.
+
+In short, it's a signed JSON object that does something useful (for example,
+authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is
+made of three parts, separated by `.`'s. The first two parts are JSON objects,
+that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648)
+encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for
+verifying the last part, the signature. For example, which encryption method
+was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and
+contains the actual stuff you care about. Refer to [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about
+reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and
+signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA,
+RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have
+ [Go](https://go.dev/doc/install) installed, then you can use the command
+ below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v5
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v5"
+```
+
+## Usage
+
+A detailed usage guide, including how to sign and verify tokens can be found on
+our [documentation website](https://golang-jwt.github.io/jwt/usage/create/).
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+for examples of usage:
+
+* [Simple example of parsing and validating a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-Parse-Hmac)
+* [Simple example of building and signing a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-New-Hmac)
+* [Directory of
+ Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#pkg-examples)
+
+## Compliance
+
+This library was last reviewed to comply with [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few
+notable differences:
+
+* In order to protect against accidental use of [Unsecured
+ JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using
+ `alg=none` will only be accepted if the constant
+ `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are
+appreciated. The API should be considered stable. There should be very few
+backward-incompatible changes outside of major version updates (and only with
+good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
+requests will land on `main`. Periodically, versions will be tagged from
+`main`. You can find all the releases on [the project releases
+page](https://github.com/golang-jwt/jwt/releases).
+
+**BREAKING CHANGES:** A full list of breaking changes is available in
+`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating
+your code.
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing
+methods or key functions. Simply implement the `SigningMethod` interface and
+register a factory method using `RegisterSigningMethod` or provide a
+`jwt.Keyfunc`.
+
+A common use case would be integrating with different 3rd party signature
+providers, like key management services from various cloud providers or Hardware
+Security Modules (HSMs) or to implement additional standards.
+
+| Extension | Purpose | Repo |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
+| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by
+third parties and should not be considered as a primary offer by any of the
+mentioned cloud providers
+
+## More
+
+Go package documentation can be found [on
+pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v5). Additional
+documentation can be found on [our project
+page](https://golang-jwt.github.io/jwt/).
+
+The command line utility included in this project (cmd/jwt) provides a
+straightforward example of token creation and parsing as well as a useful tool
+for debugging your own integration. You'll also find several implementation
+examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version
+of the JWT logo, which is distributed under the terms of the [MIT
+License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
new file mode 100644
index 0000000..2740597
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!
diff --git a/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
new file mode 100644
index 0000000..b5039e4
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
@@ -0,0 +1,137 @@
+# `jwt-go` Version History
+
+The following version history is kept for historic purposes. To retrieve the current changes of each version, please refer to the change-log of the specific release versions on https://github.com/golang-jwt/jwt/releases.
+
+## 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
+## 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+## 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+ * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+ * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+ * Added `Claims` interface type to allow users to decode the claims into a custom type
+ * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+ * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+ * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+ * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+ * Added several new, more specific, validation errors to error type bitmask
+ * Moved examples from README to executable example files
+ * Signing method registry is now thread safe
+ * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+ * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
+ * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+ * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+ * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+ * `KeyFunc` now returns `interface{}` instead of `[]byte`
+ * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+ * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodHS256`
+ * Added public package global `SigningMethodHS384`
+ * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodRS256`
+ * Added public package global `SigningMethodRS384`
+ * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+## 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation. No functional changes
+
+## 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+## 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/v5/claims.go b/vendor/github.com/golang-jwt/jwt/v5/claims.go
new file mode 100644
index 0000000..d50ff3d
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/claims.go
@@ -0,0 +1,16 @@
+package jwt
+
+// Claims represent any form of a JWT Claims Set according to
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4. In order to have a
+// common basis for validation, it is required that an implementation is able to
+// supply at least the claim names provided in
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 namely `exp`,
+// `iat`, `nbf`, `iss`, `sub` and `aud`.
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/doc.go b/vendor/github.com/golang-jwt/jwt/v5/doc.go
new file mode 100644
index 0000000..a86dc1a
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/doc.go
@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
new file mode 100644
index 0000000..c929e4a
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
@@ -0,0 +1,134 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// SigningMethodECDSA implements the ECDSA family of signing methods.
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error {
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+ return nil
+ }
+
+ return ErrECDSAVerification
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return nil, ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays
+ // padded with zeros on the left to make sure the sizes work out.
+ // Output must be 2*keyBytes long.
+ out := make([]byte, 2*keyBytes)
+ r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ return out, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
new file mode 100644
index 0000000..5700636
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
@@ -0,0 +1,69 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
+)
+
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
new file mode 100644
index 0000000..c213811
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
@@ -0,0 +1,79 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+ "errors"
+)
+
+var (
+ ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// SigningMethodEd25519 implements the EdDSA family.
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+ SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+ SigningMethodEdDSA = &SigningMethodEd25519{}
+ RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+ return SigningMethodEdDSA
+ })
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+ return "EdDSA"
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error {
+ var ed25519Key ed25519.PublicKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+ return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType)
+ }
+
+ if len(ed25519Key) != ed25519.PublicKeySize {
+ return ErrInvalidKey
+ }
+
+ // Verify the signature
+ if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+ return ErrEd25519Verification
+ }
+
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) {
+ var ed25519Key crypto.Signer
+ var ok bool
+
+ if ed25519Key, ok = key.(crypto.Signer); !ok {
+ return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType)
+ }
+
+ if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+ return nil, ErrInvalidKey
+ }
+
+ // Sign the string and return the result. ed25519 performs a two-pass hash
+ // as part of its algorithm. Therefore, we need to pass a non-prehashed
+ // message into the Sign function, as indicated by crypto.Hash(0)
+ sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+ if err != nil {
+ return nil, err
+ }
+
+ return sig, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
new file mode 100644
index 0000000..cdb5e68
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
@@ -0,0 +1,64 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+)
+
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+ return nil, ErrNotEdPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+ return nil, ErrNotEdPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go
new file mode 100644
index 0000000..23bb616
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go
@@ -0,0 +1,49 @@
+package jwt
+
+import (
+ "errors"
+ "strings"
+)
+
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+ ErrTokenMalformed = errors.New("token is malformed")
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+ ErrTokenRequiredClaimMissing = errors.New("token is missing required claim")
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+ ErrTokenInvalidSubject = errors.New("token has invalid subject")
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+ ErrTokenInvalidId = errors.New("token has invalid id")
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
+ ErrInvalidType = errors.New("invalid type for claim")
+)
+
+// joinedError is an error type that works similar to what [errors.Join]
+// produces, with the exception that it has a nice error string; mainly its
+// error messages are concatenated using a comma, rather than a newline.
+type joinedError struct {
+ errs []error
+}
+
+func (je joinedError) Error() string {
+ msg := []string{}
+ for _, err := range je.errs {
+ msg = append(msg, err.Error())
+ }
+
+ return strings.Join(msg, ", ")
+}
+
+// joinErrors joins together multiple errors. Useful for scenarios where
+// multiple errors next to each other occur, e.g., in claims validation.
+func joinErrors(errs ...error) error {
+ return &joinedError{
+ errs: errs,
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
new file mode 100644
index 0000000..a893d35
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
@@ -0,0 +1,47 @@
+//go:build go1.20
+// +build go1.20
+
+package jwt
+
+import (
+ "fmt"
+)
+
+// Unwrap implements the multiple error unwrapping for this error type, which is
+// possible in Go 1.20.
+func (je joinedError) Unwrap() []error {
+ return je.errs
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. This makes use of Go 1.20's possibility to
+// include more than one %w formatting directive in [fmt.Errorf].
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ var format string
+ var args []any
+ if message != "" {
+ format = "%w: %s"
+ args = []any{err, message}
+ } else {
+ format = "%w"
+ args = []any{err}
+ }
+
+ for _, e := range more {
+ format += ": %w"
+ args = append(args, e)
+ }
+
+ err = fmt.Errorf(format, args...)
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
new file mode 100644
index 0000000..2ad542f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
@@ -0,0 +1,78 @@
+//go:build !go1.20
+// +build !go1.20
+
+package jwt
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Is implements checking for multiple errors using [errors.Is], since multiple
+// error unwrapping is not possible in versions less than Go 1.20.
+func (je joinedError) Is(err error) bool {
+ for _, e := range je.errs {
+ if errors.Is(e, err) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// wrappedErrors is a workaround for wrapping multiple errors in environments
+// where Go 1.20 is not available. It basically uses the already implemented
+// functionality of joinedError to handle multiple errors with supplies a
+// custom error message that is identical to the one we produce in Go 1.20 using
+// multiple %w directives.
+type wrappedErrors struct {
+ msg string
+ joinedError
+}
+
+// Error returns the stored error string
+func (we wrappedErrors) Error() string {
+ return we.msg
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. Since we cannot use of Go 1.20's possibility
+// to include more than one %w formatting directive in [fmt.Errorf], we have to
+// emulate that.
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ // We cannot wrap multiple errors here with %w, so we have to be a little
+ // bit creative. Basically, we are using %s instead of %w to produce the
+ // same error message and then throw the result into a custom error struct.
+ var format string
+ var args []any
+ if message != "" {
+ format = "%s: %s"
+ args = []any{err, message}
+ } else {
+ format = "%s"
+ args = []any{err}
+ }
+ errs := []error{err}
+
+ for _, e := range more {
+ format += ": %s"
+ args = append(args, e)
+ errs = append(errs, e)
+ }
+
+ err = &wrappedErrors{
+ msg: fmt.Sprintf(format, args...),
+ joinedError: joinedError{errs: errs},
+ }
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
new file mode 100644
index 0000000..aca600c
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
@@ -0,0 +1,104 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod. Returns nil if
+// the signature is valid. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types.
+func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return newError("HMAC verify expects []byte", ErrInvalidKeyType)
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/.
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return hasher.Sum(nil), nil
+ }
+
+ return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
new file mode 100644
index 0000000..b2b51a1
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
@@ -0,0 +1,109 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// MapClaims is a claims type that uses the map[string]interface{} for JSON
+// decoding. This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// GetExpirationTime implements the Claims interface.
+func (m MapClaims) GetExpirationTime() (*NumericDate, error) {
+ return m.parseNumericDate("exp")
+}
+
+// GetNotBefore implements the Claims interface.
+func (m MapClaims) GetNotBefore() (*NumericDate, error) {
+ return m.parseNumericDate("nbf")
+}
+
+// GetIssuedAt implements the Claims interface.
+func (m MapClaims) GetIssuedAt() (*NumericDate, error) {
+ return m.parseNumericDate("iat")
+}
+
+// GetAudience implements the Claims interface.
+func (m MapClaims) GetAudience() (ClaimStrings, error) {
+ return m.parseClaimsString("aud")
+}
+
+// GetIssuer implements the Claims interface.
+func (m MapClaims) GetIssuer() (string, error) {
+ return m.parseString("iss")
+}
+
+// GetSubject implements the Claims interface.
+func (m MapClaims) GetSubject() (string, error) {
+ return m.parseString("sub")
+}
+
+// parseNumericDate tries to parse a key in the map claims type as a number
+// date. This will succeed, if the underlying type is either a [float64] or a
+// [json.Number]. Otherwise, nil will be returned.
+func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) {
+ v, ok := m[key]
+ if !ok {
+ return nil, nil
+ }
+
+ switch exp := v.(type) {
+ case float64:
+ if exp == 0 {
+ return nil, nil
+ }
+
+ return newNumericDateFromSeconds(exp), nil
+ case json.Number:
+ v, _ := exp.Float64()
+
+ return newNumericDateFromSeconds(v), nil
+ }
+
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+}
+
+// parseClaimsString tries to parse a key in the map claims type as a
+// [ClaimsStrings] type, which can either be a string or an array of string.
+func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
+ var cs []string
+ switch v := m[key].(type) {
+ case string:
+ cs = append(cs, v)
+ case []string:
+ cs = v
+ case []interface{}:
+ for _, a := range v {
+ vs, ok := a.(string)
+ if !ok {
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+ cs = append(cs, vs)
+ }
+ }
+
+ return cs, nil
+}
+
+// parseString tries to parse a key in the map claims type as a [string] type.
+// If the key does not exist, an empty string is returned. If the key has the
+// wrong type, an error is returned.
+func (m MapClaims) parseString(key string) (string, error) {
+ var (
+ ok bool
+ raw interface{}
+ iss string
+ )
+ raw, ok = m[key]
+ if !ok {
+ return "", nil
+ }
+
+ iss, ok = raw.(string)
+ if !ok {
+ return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+
+ return iss, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go
new file mode 100644
index 0000000..685c2ea
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/none.go
@@ -0,0 +1,50 @@
+package jwt
+
+// SigningMethodNone implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = newError("'none' signature type is not allowed", ErrTokenUnverifiable)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString string, sig []byte, key interface{}) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if len(sig) != 0 {
+ return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable)
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) ([]byte, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return []byte{}, nil
+ }
+
+ return nil, NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go
new file mode 100644
index 0000000..054c7eb
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go
@@ -0,0 +1,268 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const tokenDelimiter = "."
+
+type Parser struct {
+ // If populated, only these methods will be considered valid.
+ validMethods []string
+
+ // Use JSON Number format in JSON decoder.
+ useJSONNumber bool
+
+ // Skip claims validation during token parsing.
+ skipClaimsValidation bool
+
+ validator *Validator
+
+ decodeStrict bool
+
+ decodePaddingAllowed bool
+}
+
+// NewParser creates a new Parser with the specified options
+func NewParser(options ...ParserOption) *Parser {
+ p := &Parser{
+ validator: &Validator{},
+ }
+
+ // Loop through our parsing options and apply them
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the key for validating.
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
+// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
+// than the default MapClaims implementation of Claims.
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.validMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.validMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid)
+ }
+ }
+
+ // Decode signature
+ token.Signature, err = p.DecodeSegment(parts[2])
+ if err != nil {
+ return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
+ }
+ text := strings.Join(parts[0:2], ".")
+
+ // Lookup key(s)
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, newError("no keyfunc was provided", ErrTokenUnverifiable)
+ }
+
+ got, err := keyFunc(token)
+ if err != nil {
+ return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
+ }
+
+ switch have := got.(type) {
+ case VerificationKeySet:
+ if len(have.Keys) == 0 {
+ return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable)
+ }
+ // Iterate through keys and verify signature, skipping the rest when a match is found.
+ // Return the last error if no match is found.
+ for _, key := range have.Keys {
+ if err = token.Method.Verify(text, token.Signature, key); err == nil {
+ break
+ }
+ }
+ default:
+ err = token.Method.Verify(text, token.Signature, have)
+ }
+ if err != nil {
+ return token, newError("", ErrTokenSignatureInvalid, err)
+ }
+
+ // Validate Claims
+ if !p.skipClaimsValidation {
+ // Make sure we have at least a default validator
+ if p.validator == nil {
+ p.validator = NewValidator()
+ }
+
+ if err := p.validator.Validate(claims); err != nil {
+ return token, newError("", ErrTokenInvalidClaims, err)
+ }
+ }
+
+ // No errors so far, token is valid.
+ token.Valid = true
+
+ return token, nil
+}
+
+// ParseUnverified parses the token but doesn't validate the signature.
+//
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (since it has already
+// been or will be checked elsewhere in the stack) and you want to extract values from it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ var ok bool
+ parts, ok = splitToken(tokenString)
+ if !ok {
+ return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
+ return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
+ }
+
+ // parse Claims
+ token.Claims = claims
+
+ claimBytes, err := p.DecodeSegment(parts[1])
+ if err != nil {
+ return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err)
+ }
+
+ // If `useJSONNumber` is enabled then we must use *json.Decoder to decode
+ // the claims. However, this comes with a performance penalty so only use
+ // it if we must and, otherwise, simple use json.Unmarshal.
+ if !p.useJSONNumber {
+ // JSON Unmarshal. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = json.Unmarshal(claimBytes, &c)
+ } else {
+ err = json.Unmarshal(claimBytes, &claims)
+ }
+ } else {
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ dec.UseNumber()
+ // JSON Decode. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ }
+ if err != nil {
+ return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err)
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable)
+ }
+ } else {
+ return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
+ }
+
+ return token, parts, nil
+}
+
+// splitToken splits a token string into three parts: header, claims, and signature. It will only
+// return true if the token contains exactly two delimiters and three parts. In all other cases, it
+// will return nil parts and false.
+func splitToken(token string) ([]string, bool) {
+ parts := make([]string, 3)
+ header, remain, ok := strings.Cut(token, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[0] = header
+ claims, remain, ok := strings.Cut(remain, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[1] = claims
+ // One more cut to ensure the signature is the last part of the token and there are no more
+ // delimiters. This avoids an issue where malicious input could contain additional delimiters
+ // causing unecessary overhead parsing tokens.
+ signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
+ if unexpected {
+ return nil, false
+ }
+ parts[2] = signature
+
+ return parts, true
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding. This function will
+// take into account whether the [Parser] is configured with additional options,
+// such as [WithStrictDecoding] or [WithPaddingAllowed].
+func (p *Parser) DecodeSegment(seg string) ([]byte, error) {
+ encoding := base64.RawURLEncoding
+
+ if p.decodePaddingAllowed {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ encoding = base64.URLEncoding
+ }
+
+ if p.decodeStrict {
+ encoding = encoding.Strict()
+ }
+ return encoding.DecodeString(seg)
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the cryptographic key
+// for verifying the signature. The caller is strongly encouraged to set the
+// WithValidMethods option to validate the 'alg' claim in the token matches the
+// expected algorithm. For more details about the importance of validating the
+// 'alg' claim, see
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).Parse(tokenString, keyFunc)
+}
+
+// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+//
+// Note: If you provide a custom claim implementation that embeds one of the
+// standard claims (such as RegisteredClaims), make sure that a) you either
+// embed a non-pointer version of the claims or b) if you are using a pointer,
+// allocate the proper memory for it before passing in the overall claims,
+// otherwise you might run into a panic.
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
new file mode 100644
index 0000000..88a780f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
@@ -0,0 +1,128 @@
+package jwt
+
+import "time"
+
+// ParserOption is used to implement functional-style options that modify the
+// behavior of the parser. To add new options, just create a function (ideally
+// beginning with With or Without) that returns an anonymous function that takes
+// a *Parser type as input and manipulates its configuration accordingly.
+type ParserOption func(*Parser)
+
+// WithValidMethods is an option to supply algorithm methods that the parser
+// will check. Only those methods will be considered valid. It is heavily
+// encouraged to use this option in order to prevent attacks such as
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
+func WithValidMethods(methods []string) ParserOption {
+ return func(p *Parser) {
+ p.validMethods = methods
+ }
+}
+
+// WithJSONNumber is an option to configure the underlying JSON parser with
+// UseNumber.
+func WithJSONNumber() ParserOption {
+ return func(p *Parser) {
+ p.useJSONNumber = true
+ }
+}
+
+// WithoutClaimsValidation is an option to disable claims validation. This
+// option should only be used if you exactly know what you are doing.
+func WithoutClaimsValidation() ParserOption {
+ return func(p *Parser) {
+ p.skipClaimsValidation = true
+ }
+}
+
+// WithLeeway returns the ParserOption for specifying the leeway window.
+func WithLeeway(leeway time.Duration) ParserOption {
+ return func(p *Parser) {
+ p.validator.leeway = leeway
+ }
+}
+
+// WithTimeFunc returns the ParserOption for specifying the time func. The
+// primary use-case for this is testing. If you are looking for a way to account
+// for clock-skew, WithLeeway should be used instead.
+func WithTimeFunc(f func() time.Time) ParserOption {
+ return func(p *Parser) {
+ p.validator.timeFunc = f
+ }
+}
+
+// WithIssuedAt returns the ParserOption to enable verification
+// of issued-at.
+func WithIssuedAt() ParserOption {
+ return func(p *Parser) {
+ p.validator.verifyIat = true
+ }
+}
+
+// WithExpirationRequired returns the ParserOption to make exp claim required.
+// By default exp claim is optional.
+func WithExpirationRequired() ParserOption {
+ return func(p *Parser) {
+ p.validator.requireExp = true
+ }
+}
+
+// WithAudience configures the validator to require the specified audience in
+// the `aud` claim. Validation will fail if the audience is not listed in the
+// token or the `aud` claim is missing.
+//
+// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an audience is expected.
+func WithAudience(aud string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedAud = aud
+ }
+}
+
+// WithIssuer configures the validator to require the specified issuer in the
+// `iss` claim. Validation will fail if a different issuer is specified in the
+// token or the `iss` claim is missing.
+//
+// NOTE: While the `iss` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an issuer is expected.
+func WithIssuer(iss string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedIss = iss
+ }
+}
+
+// WithSubject configures the validator to require the specified subject in the
+// `sub` claim. Validation will fail if a different subject is specified in the
+// token or the `sub` claim is missing.
+//
+// NOTE: While the `sub` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if a subject is expected.
+func WithSubject(sub string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedSub = sub
+ }
+}
+
+// WithPaddingAllowed will enable the codec used for decoding JWTs to allow
+// padding. Note that the JWS RFC7515 states that the tokens will utilize a
+// Base64url encoding with no padding. Unfortunately, some implementations of
+// JWT are producing non-standard tokens, and thus require support for decoding.
+func WithPaddingAllowed() ParserOption {
+ return func(p *Parser) {
+ p.decodePaddingAllowed = true
+ }
+}
+
+// WithStrictDecoding will switch the codec used for decoding JWTs into strict
+// mode. In this mode, the decoder requires that trailing padding bits are zero,
+// as described in RFC 4648 section 3.5.
+func WithStrictDecoding() ParserOption {
+ return func(p *Parser) {
+ p.decodeStrict = true
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
new file mode 100644
index 0000000..77951a5
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
@@ -0,0 +1,63 @@
+package jwt
+
+// RegisteredClaims are a structured version of the JWT Claims Set,
+// restricted to Registered Claim Names, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+//
+// This type can be used on its own, but then additional private and
+// public claims embedded in the JWT will not be parsed. The typical use-case
+// therefore is to embedded this in a user-defined claim type.
+//
+// See examples for how to use this with your own claim types.
+type RegisteredClaims struct {
+ // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+ Issuer string `json:"iss,omitempty"`
+
+ // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+ Subject string `json:"sub,omitempty"`
+
+ // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+ Audience ClaimStrings `json:"aud,omitempty"`
+
+ // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+ ExpiresAt *NumericDate `json:"exp,omitempty"`
+
+ // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+ NotBefore *NumericDate `json:"nbf,omitempty"`
+
+ // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+ IssuedAt *NumericDate `json:"iat,omitempty"`
+
+ // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+ ID string `json:"jti,omitempty"`
+}
+
+// GetExpirationTime implements the Claims interface.
+func (c RegisteredClaims) GetExpirationTime() (*NumericDate, error) {
+ return c.ExpiresAt, nil
+}
+
+// GetNotBefore implements the Claims interface.
+func (c RegisteredClaims) GetNotBefore() (*NumericDate, error) {
+ return c.NotBefore, nil
+}
+
+// GetIssuedAt implements the Claims interface.
+func (c RegisteredClaims) GetIssuedAt() (*NumericDate, error) {
+ return c.IssuedAt, nil
+}
+
+// GetAudience implements the Claims interface.
+func (c RegisteredClaims) GetAudience() (ClaimStrings, error) {
+ return c.Audience, nil
+}
+
+// GetIssuer implements the Claims interface.
+func (c RegisteredClaims) GetIssuer() (string, error) {
+ return c.Issuer, nil
+}
+
+// GetSubject implements the Claims interface.
+func (c RegisteredClaims) GetSubject() (string, error) {
+ return c.Subject, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
new file mode 100644
index 0000000..83cbee6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
@@ -0,0 +1,93 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSA implements the RSA family of signing methods.
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error {
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Sign implements token signing for the SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
new file mode 100644
index 0000000..28c386e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
@@ -0,0 +1,135 @@
+//go:build go1.4
+// +build go1.4
+
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+ // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+ // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+ // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+ // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+ VerifyOptions *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company.
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error {
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ opts := m.Options
+ if m.VerifyOptions != nil {
+ opts = m.VerifyOptions
+ }
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
new file mode 100644
index 0000000..b3aeebb
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
@@ -0,0 +1,107 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+)
+
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
new file mode 100644
index 0000000..0d73631
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
@@ -0,0 +1,49 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// SigningMethod can be used add new methods for signing or verifying tokens. It
+// takes a decoded signature as an input in the Verify function and produces a
+// signature in Sign. The signature is then usually base64 encoded as part of a
+// JWT.
+type SigningMethod interface {
+ Verify(signingString string, sig []byte, key interface{}) error // Returns nil if signature is valid
+ Sign(signingString string, key interface{}) ([]byte, error) // Returns signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// GetSigningMethod retrieves a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
+
+// GetAlgorithms returns a list of registered "alg" names
+func GetAlgorithms() (algs []string) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ for alg := range signingMethods {
+ algs = append(algs, alg)
+ }
+ return
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
new file mode 100644
index 0000000..53745d5
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
new file mode 100644
index 0000000..9c7f4ab
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token.go
@@ -0,0 +1,100 @@
+package jwt
+
+import (
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+)
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification. The function receives the parsed, but unverified
+// Token. This allows you to use properties in the Header of the token (such as
+// `kid`) to identify which key to use.
+//
+// The returned interface{} may be a single key or a VerificationKeySet containing
+// multiple keys.
+type Keyfunc func(*Token) (interface{}, error)
+
+// VerificationKey represents a public or secret key for verifying a token's signature.
+type VerificationKey interface {
+ crypto.PublicKey | []uint8
+}
+
+// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token.
+type VerificationKeySet struct {
+ Keys []VerificationKey
+}
+
+// Token represents a JWT Token. Different fields will be used depending on
+// whether you're creating or parsing/verifying a token.
+type Token struct {
+ Raw string // Raw contains the raw token. Populated when you [Parse] a token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]interface{} // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
+ Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
+}
+
+// New creates a new [Token] with the specified signing method and an empty map
+// of claims. Additional options can be specified, but are currently unused.
+func New(method SigningMethod, opts ...TokenOption) *Token {
+ return NewWithClaims(method, MapClaims{}, opts...)
+}
+
+// NewWithClaims creates a new [Token] with the specified signing method and
+// claims. Additional options can be specified, but are currently unused.
+func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// SignedString creates and returns a complete, signed JWT. The token is signed
+// using the SigningMethod specified in the token. Please refer to
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types
+// for an overview of the different signing methods and their respective key
+// types.
+func (t *Token) SignedString(key interface{}) (string, error) {
+ sstr, err := t.SigningString()
+ if err != nil {
+ return "", err
+ }
+
+ sig, err := t.Method.Sign(sstr, key)
+ if err != nil {
+ return "", err
+ }
+
+ return sstr + "." + t.EncodeSegment(sig), nil
+}
+
+// SigningString generates the signing string. This is the most expensive part
+// of the whole deal. Unless you need this for something special, just go
+// straight for the SignedString.
+func (t *Token) SigningString() (string, error) {
+ h, err := json.Marshal(t.Header)
+ if err != nil {
+ return "", err
+ }
+
+ c, err := json.Marshal(t.Claims)
+ if err != nil {
+ return "", err
+ }
+
+ return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding
+// stripped. In the future, this function might take into account a
+// [TokenOption]. Therefore, this function exists as a method of [Token], rather
+// than a global function.
+func (*Token) EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token_option.go b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
new file mode 100644
index 0000000..b4ae3ba
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
@@ -0,0 +1,5 @@
+package jwt
+
+// TokenOption is a reserved type, which provides some forward compatibility,
+// if we ever want to introduce token creation-related options.
+type TokenOption func(*Token)
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
new file mode 100644
index 0000000..b2655a9
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/types.go
@@ -0,0 +1,149 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "time"
+)
+
+// TimePrecision sets the precision of times and dates within this library. This
+// has an influence on the precision of times when comparing expiry or other
+// related time fields. Furthermore, it is also the precision of times when
+// serializing.
+//
+// For backwards compatibility the default precision is set to seconds, so that
+// no fractional timestamps are generated.
+var TimePrecision = time.Second
+
+// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type,
+// especially its MarshalJSON function.
+//
+// If it is set to true (the default), it will always serialize the type as an
+// array of strings, even if it just contains one element, defaulting to the
+// behavior of the underlying []string. If it is set to false, it will serialize
+// to a single string, if it contains one element. Otherwise, it will serialize
+// to an array of strings.
+var MarshalSingleStringAsArray = true
+
+// NumericDate represents a JSON numeric date value, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+type NumericDate struct {
+ time.Time
+}
+
+// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+// It will truncate the timestamp according to the precision specified in TimePrecision.
+func NewNumericDate(t time.Time) *NumericDate {
+ return &NumericDate{t.Truncate(TimePrecision)}
+}
+
+// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+// UNIX epoch with the float fraction representing non-integer seconds.
+func newNumericDateFromSeconds(f float64) *NumericDate {
+ round, frac := math.Modf(f)
+ return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+}
+
+// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+func (date NumericDate) MarshalJSON() (b []byte, err error) {
+ var prec int
+ if TimePrecision < time.Second {
+ prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+ }
+ truncatedDate := date.Truncate(TimePrecision)
+
+ // For very large timestamps, UnixNano would overflow an int64, but this
+ // function requires nanosecond level precision, so we have to use the
+ // following technique to get round the issue:
+ //
+ // 1. Take the normal unix timestamp to form the whole number part of the
+ // output,
+ // 2. Take the result of the Nanosecond function, which returns the offset
+ // within the second of the particular unix time instance, to form the
+ // decimal part of the output
+ // 3. Concatenate them to produce the final result
+ seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+ nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+ output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+ return output, nil
+}
+
+// UnmarshalJSON is an implementation of the json.RawMessage interface and
+// deserializes a [NumericDate] from a JSON representation, i.e. a
+// [json.Number]. This number represents an UNIX epoch with either integer or
+// non-integer seconds.
+func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+ var (
+ number json.Number
+ f float64
+ )
+
+ if err = json.Unmarshal(b, &number); err != nil {
+ return fmt.Errorf("could not parse NumericData: %w", err)
+ }
+
+ if f, err = number.Float64(); err != nil {
+ return fmt.Errorf("could not convert json number value to float: %w", err)
+ }
+
+ n := newNumericDateFromSeconds(f)
+ *date = *n
+
+ return nil
+}
+
+// ClaimStrings is basically just a slice of strings, but it can be either
+// serialized from a string array or just a string. This type is necessary,
+// since the "aud" claim can either be a single string or an array.
+type ClaimStrings []string
+
+func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+ var value interface{}
+
+ if err = json.Unmarshal(data, &value); err != nil {
+ return err
+ }
+
+ var aud []string
+
+ switch v := value.(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = ClaimStrings(v)
+ case []interface{}:
+ for _, vv := range v {
+ vs, ok := vv.(string)
+ if !ok {
+ return ErrInvalidType
+ }
+ aud = append(aud, vs)
+ }
+ case nil:
+ return nil
+ default:
+ return ErrInvalidType
+ }
+
+ *s = aud
+
+ return
+}
+
+func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+ // This handles a special case in the JWT RFC. If the string array, e.g.
+ // used by the "aud" field, only contains one element, it MAY be serialized
+ // as a single string. This may or may not be desired based on the ecosystem
+ // of other JWT library used, so we make it configurable by the variable
+ // MarshalSingleStringAsArray.
+ if len(s) == 1 && !MarshalSingleStringAsArray {
+ return json.Marshal(s[0])
+ }
+
+ return json.Marshal([]string(s))
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
new file mode 100644
index 0000000..008ecd8
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go
@@ -0,0 +1,316 @@
+package jwt
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "time"
+)
+
+// ClaimsValidator is an interface that can be implemented by custom claims who
+// wish to execute any additional claims validation based on
+// application-specific logic. The Validate function is then executed in
+// addition to the regular claims validation and any error returned is appended
+// to the final validation result.
+//
+// type MyCustomClaims struct {
+// Foo string `json:"foo"`
+// jwt.RegisteredClaims
+// }
+//
+// func (m MyCustomClaims) Validate() error {
+// if m.Foo != "bar" {
+// return errors.New("must be foobar")
+// }
+// return nil
+// }
+type ClaimsValidator interface {
+ Claims
+ Validate() error
+}
+
+// Validator is the core of the new Validation API. It is automatically used by
+// a [Parser] during parsing and can be modified with various parser options.
+//
+// The [NewValidator] function should be used to create an instance of this
+// struct.
+type Validator struct {
+ // leeway is an optional leeway that can be provided to account for clock skew.
+ leeway time.Duration
+
+ // timeFunc is used to supply the current time that is needed for
+ // validation. If unspecified, this defaults to time.Now.
+ timeFunc func() time.Time
+
+ // requireExp specifies whether the exp claim is required
+ requireExp bool
+
+ // verifyIat specifies whether the iat (Issued At) claim will be verified.
+ // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
+ // only specifies the age of the token, but no validation check is
+ // necessary. However, if wanted, it can be checked if the iat is
+ // unrealistic, i.e., in the future.
+ verifyIat bool
+
+ // expectedAud contains the audience this token expects. Supplying an empty
+ // string will disable aud checking.
+ expectedAud string
+
+ // expectedIss contains the issuer this token expects. Supplying an empty
+ // string will disable iss checking.
+ expectedIss string
+
+ // expectedSub contains the subject this token expects. Supplying an empty
+ // string will disable sub checking.
+ expectedSub string
+}
+
+// NewValidator can be used to create a stand-alone validator with the supplied
+// options. This validator can then be used to validate already parsed claims.
+//
+// Note: Under normal circumstances, explicitly creating a validator is not
+// needed and can potentially be dangerous; instead functions of the [Parser]
+// class should be used.
+//
+// The [Validator] is only checking the *validity* of the claims, such as its
+// expiration time, but it does NOT perform *signature verification* of the
+// token.
+func NewValidator(opts ...ParserOption) *Validator {
+ p := NewParser(opts...)
+ return p.validator
+}
+
+// Validate validates the given claims. It will also perform any custom
+// validation if claims implements the [ClaimsValidator] interface.
+//
+// Note: It will NOT perform any *signature verification* on the token that
+// contains the claims and expects that the [Claim] was already successfully
+// verified.
+func (v *Validator) Validate(claims Claims) error {
+ var (
+ now time.Time
+ errs []error = make([]error, 0, 6)
+ err error
+ )
+
+ // Check, if we have a time func
+ if v.timeFunc != nil {
+ now = v.timeFunc()
+ } else {
+ now = time.Now()
+ }
+
+ // We always need to check the expiration time, but usage of the claim
+ // itself is OPTIONAL by default. requireExp overrides this behavior
+ // and makes the exp claim mandatory.
+ if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil {
+ errs = append(errs, err)
+ }
+
+ // We always need to check not-before, but usage of the claim itself is
+ // OPTIONAL.
+ if err = v.verifyNotBefore(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Check issued-at if the option is enabled
+ if v.verifyIat {
+ if err = v.verifyIssuedAt(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected audience, we also require the audience claim
+ if v.expectedAud != "" {
+ if err = v.verifyAudience(claims, v.expectedAud, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected issuer, we also require the issuer claim
+ if v.expectedIss != "" {
+ if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected subject, we also require the subject claim
+ if v.expectedSub != "" {
+ if err = v.verifySubject(claims, v.expectedSub, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // Finally, we want to give the claim itself some possibility to do some
+ // additional custom validation based on a custom Validate function.
+ cvt, ok := claims.(ClaimsValidator)
+ if ok {
+ if err := cvt.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+
+ return joinErrors(errs...)
+}
+
+// verifyExpiresAt compares the exp claim in claims against cmp. This function
+// will succeed if cmp < exp. Additional leeway is taken into account.
+//
+// If exp is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
+ exp, err := claims.GetExpirationTime()
+ if err != nil {
+ return err
+ }
+
+ if exp == nil {
+ return errorIfRequired(required, "exp")
+ }
+
+ return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired)
+}
+
+// verifyIssuedAt compares the iat claim in claims against cmp. This function
+// will succeed if cmp >= iat. Additional leeway is taken into account.
+//
+// If iat is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
+ iat, err := claims.GetIssuedAt()
+ if err != nil {
+ return err
+ }
+
+ if iat == nil {
+ return errorIfRequired(required, "iat")
+ }
+
+ return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued)
+}
+
+// verifyNotBefore compares the nbf claim in claims against cmp. This function
+// will return true if cmp >= nbf. Additional leeway is taken into account.
+//
+// If nbf is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
+ nbf, err := claims.GetNotBefore()
+ if err != nil {
+ return err
+ }
+
+ if nbf == nil {
+ return errorIfRequired(required, "nbf")
+ }
+
+ return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet)
+}
+
+// verifyAudience compares the aud claim against cmp.
+//
+// If aud is not set or an empty list, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error {
+ aud, err := claims.GetAudience()
+ if err != nil {
+ return err
+ }
+
+ if len(aud) == 0 {
+ return errorIfRequired(required, "aud")
+ }
+
+ // use a var here to keep constant time compare when looping over a number of claims
+ result := false
+
+ var stringClaims string
+ for _, a := range aud {
+ if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+ result = true
+ }
+ stringClaims = stringClaims + a
+ }
+
+ // case where "" is sent in one or many aud claims
+ if stringClaims == "" {
+ return errorIfRequired(required, "aud")
+ }
+
+ return errorIfFalse(result, ErrTokenInvalidAudience)
+}
+
+// verifyIssuer compares the iss claim in claims against cmp.
+//
+// If iss is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error {
+ iss, err := claims.GetIssuer()
+ if err != nil {
+ return err
+ }
+
+ if iss == "" {
+ return errorIfRequired(required, "iss")
+ }
+
+ return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer)
+}
+
+// verifySubject compares the sub claim against cmp.
+//
+// If sub is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error {
+ sub, err := claims.GetSubject()
+ if err != nil {
+ return err
+ }
+
+ if sub == "" {
+ return errorIfRequired(required, "sub")
+ }
+
+ return errorIfFalse(sub == cmp, ErrTokenInvalidSubject)
+}
+
+// errorIfFalse returns the error specified in err, if the value is true.
+// Otherwise, nil is returned.
+func errorIfFalse(value bool, err error) error {
+ if value {
+ return nil
+ } else {
+ return err
+ }
+}
+
+// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is
+// true. Otherwise, nil is returned.
+func errorIfRequired(required bool, claim string) error {
+ if required {
+ return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing)
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 0000000..7ec5ac7
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
new file mode 100644
index 0000000..a502fdc
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b4bb97f
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Paul Borman
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/vendor/github.com/google/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
new file mode 100644
index 0000000..3e9a618
--- /dev/null
+++ b/vendor/github.com/google/uuid/README.md
@@ -0,0 +1,21 @@
+# uuid
+The uuid package generates and inspects UUIDs based on
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
+and DCE 1.1: Authentication and Security Services.
+
+This package is based on the github.com/pborman/uuid package (previously named
+code.google.com/p/go-uuid). It differs from these earlier packages in that
+a UUID is a 16 byte array rather than a byte slice. One loss due to this
+change is the ability to represent an invalid UUID (vs a NIL UUID).
+
+###### Install
+```sh
+go get github.com/google/uuid
+```
+
+###### Documentation
+[](https://pkg.go.dev/github.com/google/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without
+installing this package by using the GoDoc site here:
+http://pkg.go.dev/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
new file mode 100644
index 0000000..fa820b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/dce.go
@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+ uuid, err := NewUUID()
+ if err == nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCESecurity(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCESecurity(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID. Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+ return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+ return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
new file mode 100644
index 0000000..5b8a4b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/doc.go
@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
+// maps or compared directly.
+package uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
new file mode 100644
index 0000000..dc60082
--- /dev/null
+++ b/vendor/github.com/google/uuid/hash.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+ NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+ Nil UUID // empty UUID, all zeros
+
+ // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+ Max = UUID{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space[:]) //nolint:errcheck
+ h.Write(data) //nolint:errcheck
+ s := h.Sum(nil)
+ var uuid UUID
+ copy(uuid[:], s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data. It is the same as calling:
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data. It is the same as calling:
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
new file mode 100644
index 0000000..14bd340
--- /dev/null
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+ var js [36]byte
+ encodeHex(js[:], uuid)
+ return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+ id, err := ParseBytes(data)
+ if err != nil {
+ return err
+ }
+ *uuid = id
+ return nil
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+ return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(uuid[:], data)
+ return nil
+}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
new file mode 100644
index 0000000..d651a2b
--- /dev/null
+++ b/vendor/github.com/google/uuid/node.go
@@ -0,0 +1,90 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "sync"
+)
+
+var (
+ nodeMu sync.Mutex
+ ifname string // name of interface being used
+ nodeID [6]byte // hardware for version 1 UUIDs
+ zeroID [6]byte // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+ iname, addr := getHardwareInterface(name) // null implementation for js
+ if iname != "" && addr != nil {
+ ifname = iname
+ copy(nodeID[:], addr)
+ return true
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ ifname = "random"
+ randomBits(nodeID[:])
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ nid := nodeID
+ return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ copy(nodeID[:], id)
+ ifname = "user"
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ var node [6]byte
+ copy(node[:], uuid[10:])
+ return node[:]
+}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 0000000..b2a0bc8
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This removes the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 0000000..0cbbcdd
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned. If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil {
+ return "", nil
+ }
+ }
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ return ifs.Name, ifs.HardwareAddr
+ }
+ }
+ return "", nil
+}
diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go
new file mode 100644
index 0000000..d7fcbf2
--- /dev/null
+++ b/vendor/github.com/google/uuid/null.go
@@ -0,0 +1,118 @@
+// Copyright 2021 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+)
+
+var jsonNull = []byte("null")
+
+// NullUUID represents a UUID that may be null.
+// NullUUID implements the SQL driver.Scanner interface so
+// it can be used as a scan destination:
+//
+// var u uuid.NullUUID
+// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
+// ...
+// if u.Valid {
+// // use u.UUID
+// } else {
+// // NULL value
+// }
+//
+type NullUUID struct {
+ UUID UUID
+ Valid bool // Valid is true if UUID is not NULL
+}
+
+// Scan implements the SQL driver.Scanner interface.
+func (nu *NullUUID) Scan(value interface{}) error {
+ if value == nil {
+ nu.UUID, nu.Valid = Nil, false
+ return nil
+ }
+
+ err := nu.UUID.Scan(value)
+ if err != nil {
+ nu.Valid = false
+ return err
+ }
+
+ nu.Valid = true
+ return nil
+}
+
+// Value implements the driver Valuer interface.
+func (nu NullUUID) Value() (driver.Value, error) {
+ if !nu.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return nu.UUID.Value()
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (nu NullUUID) MarshalBinary() ([]byte, error) {
+ if nu.Valid {
+ return nu.UUID[:], nil
+ }
+
+ return []byte(nil), nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (nu *NullUUID) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(nu.UUID[:], data)
+ nu.Valid = true
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (nu NullUUID) MarshalText() ([]byte, error) {
+ if nu.Valid {
+ return nu.UUID.MarshalText()
+ }
+
+ return jsonNull, nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (nu *NullUUID) UnmarshalText(data []byte) error {
+ id, err := ParseBytes(data)
+ if err != nil {
+ nu.Valid = false
+ return err
+ }
+ nu.UUID = id
+ nu.Valid = true
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (nu NullUUID) MarshalJSON() ([]byte, error) {
+ if nu.Valid {
+ return json.Marshal(nu.UUID)
+ }
+
+ return jsonNull, nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (nu *NullUUID) UnmarshalJSON(data []byte) error {
+ if bytes.Equal(data, jsonNull) {
+ *nu = NullUUID{}
+ return nil // valid null UUID
+ }
+ err := json.Unmarshal(data, &nu.UUID)
+ nu.Valid = err == nil
+ return err
+}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
new file mode 100644
index 0000000..2e02ec0
--- /dev/null
+++ b/vendor/github.com/google/uuid/sql.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case nil:
+ return nil
+
+ case string:
+ // if an empty UUID comes from a table, we return a null UUID
+ if src == "" {
+ return nil
+ }
+
+ // see Parse for required string format
+ u, err := Parse(src)
+ if err != nil {
+ return fmt.Errorf("Scan: %v", err)
+ }
+
+ *uuid = u
+
+ case []byte:
+ // if an empty UUID comes from a table, we return a null UUID
+ if len(src) == 0 {
+ return nil
+ }
+
+ // assumes a simple slice of bytes if 16 bytes
+ // otherwise attempts to parse
+ if len(src) != 16 {
+ return uuid.Scan(string(src))
+ }
+ copy((*uuid)[:], src)
+
+ default:
+ return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+ }
+
+ return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+ return uuid.String(), nil
+}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
new file mode 100644
index 0000000..c351129
--- /dev/null
+++ b/vendor/github.com/google/uuid/time.go
@@ -0,0 +1,134 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ timeMu sync.Mutex
+ lasttime uint64 // last time we returned
+ clockSeq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clockSeq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
+func ClockSequence() int {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clockSeq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clockSeq & 0x3fff)
+}
+
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ oldSeq := clockSeq
+ clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if oldSeq != clockSeq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
+func (uuid UUID) Time() Time {
+ var t Time
+ switch uuid.Version() {
+ case 6:
+ time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+ t = Time(time)
+ case 7:
+ time := binary.BigEndian.Uint64(uuid[:8])
+ t = Time((time>>16)*10000 + g1582ns100)
+ default: // forward compatible
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ t = Time(time)
+ }
+ return t
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
new file mode 100644
index 0000000..5ea6c73
--- /dev/null
+++ b/vendor/github.com/google/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+ b1 := xvalues[x1]
+ b2 := xvalues[x2]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
new file mode 100644
index 0000000..5232b48
--- /dev/null
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -0,0 +1,365 @@
+// Copyright 2018 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+const randPoolSize = 16 * 16
+
+var (
+ rander = rand.Reader // random function
+ poolEnabled = false
+ poolMu sync.Mutex
+ poolPos = randPoolSize // protected with poolMu
+ pool [randPoolSize]byte // protected with poolMu
+)
+
+type invalidLengthError struct{ len int }
+
+func (err invalidLengthError) Error() string {
+ return fmt.Sprintf("invalid UUID length: %d", err.len)
+}
+
+// IsInvalidLengthError is matcher function for custom error invalidLengthError
+func IsInvalidLengthError(err error) bool {
+ _, ok := err.(invalidLengthError)
+ return ok
+}
+
+// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
+// examined in the latter case. Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
+func Parse(s string) (UUID, error) {
+ var uuid UUID
+ switch len(s) {
+ // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36:
+
+ // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ case 36 + 2:
+ s = s[1:]
+
+ // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ case 32:
+ var ok bool
+ for i := range uuid {
+ uuid[i], ok = xtob(s[i*2], s[i*2+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, invalidLengthError{len(s)}
+ }
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return uuid, errors.New("invalid UUID format")
+ }
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34,
+ } {
+ v, ok := xtob(s[x], s[x+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ uuid[i] = v
+ }
+ return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+ var uuid UUID
+ switch len(b) {
+ case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
+ return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+ }
+ b = b[9:]
+ case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ b = b[1:]
+ case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ var ok bool
+ for i := 0; i < 32; i += 2 {
+ uuid[i/2], ok = xtob(b[i], b[i+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, invalidLengthError{len(b)}
+ }
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+ return uuid, errors.New("invalid UUID format")
+ }
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34,
+ } {
+ v, ok := xtob(b[x], b[x+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ uuid[i] = v
+ }
+ return uuid, nil
+}
+
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+ uuid, err := Parse(s)
+ if err != nil {
+ panic(`uuid: Parse(` + s + `): ` + err.Error())
+ }
+ return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+ err = uuid.UnmarshalBinary(b)
+ return uuid, err
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+ if err != nil {
+ panic(err)
+ }
+ return uuid
+}
+
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+ switch len(s) {
+ // Standard UUID format
+ case 36:
+
+ // UUID with "urn:uuid:" prefix
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // UUID enclosed in braces
+ case 36 + 2:
+ if s[0] != '{' || s[len(s)-1] != '}' {
+ return fmt.Errorf("invalid bracketed UUID format")
+ }
+ s = s[1 : len(s)-1]
+
+ // UUID without hyphens
+ case 32:
+ for i := 0; i < len(s); i += 2 {
+ _, ok := xtob(s[i], s[i+1])
+ if !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+
+ default:
+ return invalidLengthError{len(s)}
+ }
+
+ // Check for standard UUID format
+ if len(s) == 36 {
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return errors.New("invalid UUID format")
+ }
+ for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+ if _, ok := xtob(s[x], s[x+1]); !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+ }
+
+ return nil
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ var buf [36]byte
+ encodeHex(buf[:], uuid)
+ return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ var buf [36 + 9]byte
+ copy(buf[:], "urn:uuid:")
+ encodeHex(buf[9:], uuid)
+ return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+ hex.Encode(dst, uuid[:4])
+ dst[8] = '-'
+ hex.Encode(dst[9:13], uuid[4:6])
+ dst[13] = '-'
+ hex.Encode(dst[14:18], uuid[6:8])
+ dst[18] = '-'
+ hex.Encode(dst[19:23], uuid[8:10])
+ dst[23] = '-'
+ hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+ return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
+
+// EnableRandPool enables internal randomness pool used for Random
+// (Version 4) UUID generation. The pool contains random bytes read from
+// the random number generator on demand in batches. Enabling the pool
+// may improve the UUID generation throughput significantly.
+//
+// Since the pool is stored on the Go heap, this feature may be a bad fit
+// for security sensitive applications.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func EnableRandPool() {
+ poolEnabled = true
+}
+
+// DisableRandPool disables the randomness pool if it was previously
+// enabled with EnableRandPool.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func DisableRandPool() {
+ poolEnabled = false
+ defer poolMu.Unlock()
+ poolMu.Lock()
+ poolPos = randPoolSize
+}
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+ var uuidStrs = make([]string, len(uuids))
+ for i, uuid := range uuids {
+ uuidStrs[i] = uuid.String()
+ }
+ return uuidStrs
+}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
new file mode 100644
index 0000000..4631096
--- /dev/null
+++ b/vendor/github.com/google/uuid/version1.go
@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ timeLow := uint32(now & 0xffffffff)
+ timeMid := uint16((now >> 32) & 0xffff)
+ timeHi := uint16((now >> 48) & 0x0fff)
+ timeHi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], timeLow)
+ binary.BigEndian.PutUint16(uuid[4:], timeMid)
+ binary.BigEndian.PutUint16(uuid[6:], timeHi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
new file mode 100644
index 0000000..7697802
--- /dev/null
+++ b/vendor/github.com/google/uuid/version4.go
@@ -0,0 +1,76 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New creates a new random UUID or panics. New is equivalent to
+// the expression
+//
+// uuid.Must(uuid.NewRandom())
+func New() UUID {
+ return Must(NewRandom())
+}
+
+// NewString creates a new random UUID and returns it as a string or panics.
+// NewString is equivalent to the expression
+//
+// uuid.New().String()
+func NewString() string {
+ return Must(NewRandom()).String()
+}
+
+// NewRandom returns a Random (Version 4) UUID.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// Uses the randomness pool if it was enabled with EnableRandPool.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() (UUID, error) {
+ if !poolEnabled {
+ return NewRandomFromReader(rander)
+ }
+ return newRandomFromPool()
+}
+
+// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
+func NewRandomFromReader(r io.Reader) (UUID, error) {
+ var uuid UUID
+ _, err := io.ReadFull(r, uuid[:])
+ if err != nil {
+ return Nil, err
+ }
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid, nil
+}
+
+func newRandomFromPool() (UUID, error) {
+ var uuid UUID
+ poolMu.Lock()
+ if poolPos == randPoolSize {
+ _, err := io.ReadFull(rander, pool[:])
+ if err != nil {
+ poolMu.Unlock()
+ return Nil, err
+ }
+ poolPos = 0
+ }
+ copy(uuid[:], pool[poolPos:(poolPos+16)])
+ poolPos += 16
+ poolMu.Unlock()
+
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_mid | time_low_and_version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |clk_seq_hi_res | clk_seq_low | node (0-1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | node (2-5) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ uuid[6] = 0x60 | (uuid[6] & 0x0F)
+ uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 0000000..3167b64
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+ uuid, err := NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+ uuid, err := NewRandomFromReader(r)
+ if err != nil {
+ return uuid, err
+ }
+
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms | ver | rand_a (12 bit seq) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |var| rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ _ = uuid[15] // bounds check
+
+ t, s := getV7Time()
+
+ uuid[0] = byte(t >> 40)
+ uuid[1] = byte(t >> 32)
+ uuid[2] = byte(t >> 24)
+ uuid[3] = byte(t >> 16)
+ uuid[4] = byte(t >> 8)
+ uuid[5] = byte(t)
+
+ uuid[6] = 0x70 | (0x0F & byte(s>>8))
+ uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+// 52 bits of time in milliseconds since epoch
+// 12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+ timeMu.Lock()
+ defer timeMu.Unlock()
+
+ nano := timeNow().UnixNano()
+ milli = nano / nanoPerMilli
+ // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+ seq = (nano - milli*nanoPerMilli) >> 8
+ now := milli<<12 + seq
+ if now <= lastV7time {
+ now = lastV7time + 1
+ milli = now >> 12
+ seq = now & 0xfff
+ }
+ lastV7time = now
+ return milli, seq
+}
diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore
new file mode 100644
index 0000000..cd3fcd1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml
diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS
new file mode 100644
index 0000000..1931f40
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/AUTHORS
@@ -0,0 +1,9 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd
+Google LLC (https://opensource.google.com/)
+Joachim Bauch
+
diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE
new file mode 100644
index 0000000..9171c97
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
new file mode 100644
index 0000000..d33ed7f
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/README.md
@@ -0,0 +1,33 @@
+# Gorilla WebSocket
+
+[](https://godoc.org/github.com/gorilla/websocket)
+[](https://circleci.com/gh/gorilla/websocket)
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+
+### Documentation
+
+* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+ go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
new file mode 100644
index 0000000..04fdafe
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -0,0 +1,434 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+ d := Dialer{
+ ReadBufferSize: readBufSize,
+ WriteBufferSize: writeBufSize,
+ NetDial: func(net, addr string) (net.Conn, error) {
+ return netConn, nil
+ },
+ }
+ return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+//
+// It is safe to call Dialer's methods concurrently.
+type Dialer struct {
+ // NetDial specifies the dial function for creating TCP connections. If
+ // NetDial is nil, net.Dial is used.
+ NetDial func(network, addr string) (net.Conn, error)
+
+ // NetDialContext specifies the dial function for creating TCP connections. If
+ // NetDialContext is nil, NetDial is used.
+ NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If
+ // NetDialTLSContext is nil, NetDialContext is used.
+ // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and
+ // TLSClientConfig is ignored.
+ NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*http.Request) (*url.URL, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with tls.Client.
+ // If nil, the default configuration is used.
+ // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake
+ // is done there and TLSClientConfig is ignored.
+ TLSClientConfig *tls.Config
+
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then a useful default size is used. The I/O buffer sizes
+ // do not limit the size of the messages that can be sent or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the client's requested subprotocols.
+ Subprotocols []string
+
+ // EnableCompression specifies if the client should attempt to negotiate
+ // per message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+
+ // Jar specifies the cookie jar.
+ // If Jar is nil, cookies are not sent in requests and ignored
+ // in responses.
+ Jar http.CookieJar
+}
+
+// Dial creates a new client connection by calling DialContext with a background context.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ return d.DialContext(context.Background(), urlStr, requestHeader)
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+ hostPort = u.Host
+ hostNoPort = u.Host
+ if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+ hostNoPort = hostNoPort[:i]
+ } else {
+ switch u.Scheme {
+ case "wss":
+ hostPort += ":443"
+ case "https":
+ hostPort += ":443"
+ default:
+ hostPort += ":80"
+ }
+ }
+ return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default values.
+var DefaultDialer = &Dialer{
+ Proxy: http.ProxyFromEnvironment,
+ HandshakeTimeout: 45 * time.Second,
+}
+
+// nilDialer is dialer to use when receiver is nil.
+var nilDialer = *DefaultDialer
+
+// DialContext creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// The context will be used in the request and in the Dialer.
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+ if d == nil {
+ d = &nilDialer
+ }
+
+ challengeKey, err := generateChallengeKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if u.User != nil {
+ // User name and password are not allowed in websocket URIs.
+ return nil, nil, errMalformedURL
+ }
+
+ req := &http.Request{
+ Method: http.MethodGet,
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: u.Host,
+ }
+ req = req.WithContext(ctx)
+
+ // Set the cookies present in the cookie jar of the dialer
+ if d.Jar != nil {
+ for _, cookie := range d.Jar.Cookies(u) {
+ req.AddCookie(cookie)
+ }
+ }
+
+ // Set the request headers using the capitalization for names and values in
+ // RFC examples. Although the capitalization shouldn't matter, there are
+ // servers that depend on it. The Header.Set method is not used because the
+ // method canonicalizes the header names.
+ req.Header["Upgrade"] = []string{"websocket"}
+ req.Header["Connection"] = []string{"Upgrade"}
+ req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+ req.Header["Sec-WebSocket-Version"] = []string{"13"}
+ if len(d.Subprotocols) > 0 {
+ req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+ }
+ for k, vs := range requestHeader {
+ switch {
+ case k == "Host":
+ if len(vs) > 0 {
+ req.Host = vs[0]
+ }
+ case k == "Upgrade" ||
+ k == "Connection" ||
+ k == "Sec-Websocket-Key" ||
+ k == "Sec-Websocket-Version" ||
+ k == "Sec-Websocket-Extensions" ||
+ (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+ return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+ case k == "Sec-Websocket-Protocol":
+ req.Header["Sec-WebSocket-Protocol"] = vs
+ default:
+ req.Header[k] = vs
+ }
+ }
+
+ if d.EnableCompression {
+ req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
+ }
+
+ if d.HandshakeTimeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
+ defer cancel()
+ }
+
+ // Get network dial function.
+ var netDial func(network, add string) (net.Conn, error)
+
+ switch u.Scheme {
+ case "http":
+ if d.NetDialContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialContext(ctx, network, addr)
+ }
+ } else if d.NetDial != nil {
+ netDial = d.NetDial
+ }
+ case "https":
+ if d.NetDialTLSContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialTLSContext(ctx, network, addr)
+ }
+ } else if d.NetDialContext != nil {
+ netDial = func(network, addr string) (net.Conn, error) {
+ return d.NetDialContext(ctx, network, addr)
+ }
+ } else if d.NetDial != nil {
+ netDial = d.NetDial
+ }
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if netDial == nil {
+ netDialer := &net.Dialer{}
+ netDial = func(network, addr string) (net.Conn, error) {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ }
+
+ // If needed, wrap the dial function to set the connection deadline.
+ if deadline, ok := ctx.Deadline(); ok {
+ forwardDial := netDial
+ netDial = func(network, addr string) (net.Conn, error) {
+ c, err := forwardDial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ err = c.SetDeadline(deadline)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+ }
+ }
+
+ // If needed, wrap the dial function to connect through a proxy.
+ if d.Proxy != nil {
+ proxyURL, err := d.Proxy(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if proxyURL != nil {
+ dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+ if err != nil {
+ return nil, nil, err
+ }
+ netDial = dialer.Dial
+ }
+ }
+
+ hostPort, hostNoPort := hostPortNoPort(u)
+ trace := httptrace.ContextClientTrace(ctx)
+ if trace != nil && trace.GetConn != nil {
+ trace.GetConn(hostPort)
+ }
+
+ netConn, err := netDial("tcp", hostPort)
+ if err != nil {
+ return nil, nil, err
+ }
+ if trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{
+ Conn: netConn,
+ })
+ }
+
+ defer func() {
+ if netConn != nil {
+ netConn.Close()
+ }
+ }()
+
+ if u.Scheme == "https" && d.NetDialTLSContext == nil {
+ // If NetDialTLSContext is set, assume that the TLS handshake has already been done
+
+ cfg := cloneTLSConfig(d.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = hostNoPort
+ }
+ tlsConn := tls.Client(netConn, cfg)
+ netConn = tlsConn
+
+ if trace != nil && trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ err := doHandshake(ctx, tlsConn, cfg)
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
+
+ if err := req.Write(netConn); err != nil {
+ return nil, nil, err
+ }
+
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
+ trace.GotFirstResponseByte()
+ }
+ }
+
+ resp, err := http.ReadResponse(conn.br, req)
+ if err != nil {
+ if d.TLSClientConfig != nil {
+ for _, proto := range d.TLSClientConfig.NextProtos {
+ if proto != "http/1.1" {
+ return nil, nil, fmt.Errorf(
+ "websocket: protocol %q was given but is not supported;"+
+ "sharing tls.Config with net/http Transport can cause this error: %w",
+ proto, err,
+ )
+ }
+ }
+ }
+ return nil, nil, err
+ }
+
+ if d.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ d.Jar.SetCookies(u, rc)
+ }
+ }
+
+ if resp.StatusCode != 101 ||
+ !tokenListContainsValue(resp.Header, "Upgrade", "websocket") ||
+ !tokenListContainsValue(resp.Header, "Connection", "upgrade") ||
+ resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+ // Before closing the network connection on return from this
+ // function, slurp up some of the response to aid application
+ // debugging.
+ buf := make([]byte, 1024)
+ n, _ := io.ReadFull(resp.Body, buf)
+ resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+ return nil, resp, ErrBadHandshake
+ }
+
+ for _, ext := range parseExtensions(resp.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ _, snct := ext["server_no_context_takeover"]
+ _, cnct := ext["client_no_context_takeover"]
+ if !snct || !cnct {
+ return nil, resp, errInvalidCompression
+ }
+ conn.newCompressionWriter = compressNoContextTakeover
+ conn.newDecompressionReader = decompressNoContextTakeover
+ break
+ }
+
+ resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+ netConn.SetDeadline(time.Time{})
+ netConn = nil // to avoid close in defer.
+ return conn, resp, nil
+}
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
new file mode 100644
index 0000000..813ffb1
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/compression.go
@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "compress/flate"
+ "errors"
+ "io"
+ "strings"
+ "sync"
+)
+
+const (
+ minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
+ maxCompressionLevel = flate.BestCompression
+ defaultCompressionLevel = 1
+)
+
+var (
+ flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+ flateReaderPool = sync.Pool{New: func() interface{} {
+ return flate.NewReader(nil)
+ }}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+ const tail =
+ // Add four bytes as specified in RFC
+ "\x00\x00\xff\xff" +
+ // Add final block to squelch unexpected EOF error from flate reader.
+ "\x01\x00\x00\xff\xff"
+
+ fr, _ := flateReaderPool.Get().(io.ReadCloser)
+ fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+ return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+ return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+ p := &flateWriterPools[level-minCompressionLevel]
+ tw := &truncWriter{w: w}
+ fw, _ := p.Get().(*flate.Writer)
+ if fw == nil {
+ fw, _ = flate.NewWriter(tw, level)
+ } else {
+ fw.Reset(tw)
+ }
+ return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+ w io.WriteCloser
+ n int
+ p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+ n := 0
+
+ // fill buffer first for simplicity.
+ if w.n < len(w.p) {
+ n = copy(w.p[w.n:], p)
+ p = p[n:]
+ w.n += n
+ if len(p) == 0 {
+ return n, nil
+ }
+ }
+
+ m := len(p)
+ if m > len(w.p) {
+ m = len(w.p)
+ }
+
+ if nn, err := w.w.Write(w.p[:m]); err != nil {
+ return n + nn, err
+ }
+
+ copy(w.p[:], w.p[m:])
+ copy(w.p[len(w.p)-m:], p[len(p)-m:])
+ nn, err := w.w.Write(p[:len(p)-m])
+ return n + nn, err
+}
+
+type flateWriteWrapper struct {
+ fw *flate.Writer
+ tw *truncWriter
+ p *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+ if w.fw == nil {
+ return 0, errWriteClosed
+ }
+ return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+ if w.fw == nil {
+ return errWriteClosed
+ }
+ err1 := w.fw.Flush()
+ w.p.Put(w.fw)
+ w.fw = nil
+ if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+ return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+ }
+ err2 := w.tw.w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+type flateReadWrapper struct {
+ fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+ if r.fr == nil {
+ return 0, io.ErrClosedPipe
+ }
+ n, err := r.fr.Read(p)
+ if err == io.EOF {
+ // Preemptively place the reader back in the pool. This helps with
+ // scenarios where the application does not call NextReader() soon after
+ // this final read.
+ r.Close()
+ }
+ return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+ if r.fr == nil {
+ return io.ErrClosedPipe
+ }
+ err := r.fr.Close()
+ flateReaderPool.Put(r.fr)
+ r.fr = nil
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
new file mode 100644
index 0000000..5161ef8
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -0,0 +1,1238 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // Frame header byte 0 bits from Section 5.2 of RFC 6455
+ finalBit = 1 << 7
+ rsv1Bit = 1 << 6
+ rsv2Bit = 1 << 5
+ rsv3Bit = 1 << 4
+
+ // Frame header byte 1 bits from Section 5.2 of RFC 6455
+ maskBit = 1 << 7
+
+ maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
+ maxControlFramePayloadSize = 125
+
+ writeWait = time.Second
+
+ defaultReadBufferSize = 4096
+ defaultWriteBufferSize = 4096
+
+ continuationFrame = 0
+ noFrame = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+ CloseNormalClosure = 1000
+ CloseGoingAway = 1001
+ CloseProtocolError = 1002
+ CloseUnsupportedData = 1003
+ CloseNoStatusReceived = 1005
+ CloseAbnormalClosure = 1006
+ CloseInvalidFramePayloadData = 1007
+ ClosePolicyViolation = 1008
+ CloseMessageTooBig = 1009
+ CloseMandatoryExtension = 1010
+ CloseInternalServerErr = 1011
+ CloseServiceRestart = 1012
+ CloseTryAgainLater = 1013
+ CloseTLSHandshake = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+ // TextMessage denotes a text data message. The text message payload is
+ // interpreted as UTF-8 encoded text data.
+ TextMessage = 1
+
+ // BinaryMessage denotes a binary data message.
+ BinaryMessage = 2
+
+ // CloseMessage denotes a close control message. The optional message
+ // payload contains a numeric code and text. Use the FormatCloseMessage
+ // function to format a close message payload.
+ CloseMessage = 8
+
+ // PingMessage denotes a ping control message. The optional message payload
+ // is UTF-8 encoded text.
+ PingMessage = 9
+
+ // PongMessage denotes a pong control message. The optional message payload
+ // is UTF-8 encoded text.
+ PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+ msg string
+ temporary bool
+ timeout bool
+}
+
+func (e *netError) Error() string { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool { return e.timeout }
+
+// CloseError represents a close message.
+type CloseError struct {
+ // Code is defined in RFC 6455, section 11.7.
+ Code int
+
+ // Text is the optional text payload.
+ Text string
+}
+
+func (e *CloseError) Error() string {
+ s := []byte("websocket: close ")
+ s = strconv.AppendInt(s, int64(e.Code), 10)
+ switch e.Code {
+ case CloseNormalClosure:
+ s = append(s, " (normal)"...)
+ case CloseGoingAway:
+ s = append(s, " (going away)"...)
+ case CloseProtocolError:
+ s = append(s, " (protocol error)"...)
+ case CloseUnsupportedData:
+ s = append(s, " (unsupported data)"...)
+ case CloseNoStatusReceived:
+ s = append(s, " (no status)"...)
+ case CloseAbnormalClosure:
+ s = append(s, " (abnormal closure)"...)
+ case CloseInvalidFramePayloadData:
+ s = append(s, " (invalid payload data)"...)
+ case ClosePolicyViolation:
+ s = append(s, " (policy violation)"...)
+ case CloseMessageTooBig:
+ s = append(s, " (message too big)"...)
+ case CloseMandatoryExtension:
+ s = append(s, " (mandatory extension missing)"...)
+ case CloseInternalServerErr:
+ s = append(s, " (internal server error)"...)
+ case CloseTLSHandshake:
+ s = append(s, " (TLS handshake error)"...)
+ }
+ if e.Text != "" {
+ s = append(s, ": "...)
+ s = append(s, e.Text...)
+ }
+ return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range codes {
+ if e.Code == code {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range expectedCodes {
+ if e.Code == code {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var (
+ errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+ errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+ errBadWriteOpCode = errors.New("websocket: bad write message type")
+ errWriteClosed = errors.New("websocket: write closed")
+ errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+ n := rand.Uint32()
+ return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+ if e, ok := err.(net.Error); ok && e.Temporary() {
+ err = &netError{msg: e.Error(), timeout: e.Timeout()}
+ }
+ return err
+}
+
+func isControl(frameType int) bool {
+ return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+ return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+ // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+ CloseNormalClosure: true,
+ CloseGoingAway: true,
+ CloseProtocolError: true,
+ CloseUnsupportedData: true,
+ CloseNoStatusReceived: false,
+ CloseAbnormalClosure: false,
+ CloseInvalidFramePayloadData: true,
+ ClosePolicyViolation: true,
+ CloseMessageTooBig: true,
+ CloseMandatoryExtension: true,
+ CloseInternalServerErr: true,
+ CloseServiceRestart: true,
+ CloseTryAgainLater: true,
+ CloseTLSHandshake: false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+ return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
+// interface. The type of the value stored in a pool is not specified.
+type BufferPool interface {
+ // Get gets a value from the pool or returns nil if the pool is empty.
+ Get() interface{}
+ // Put adds a value to the pool.
+ Put(interface{})
+}
+
+// writePoolData is the type added to the write buffer pool. This wrapper is
+// used to prevent applications from peeking at and depending on the values
+// added to the pool.
+type writePoolData struct{ buf []byte }
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+ conn net.Conn
+ isServer bool
+ subprotocol string
+
+ // Write fields
+ mu chan struct{} // used as mutex to protect write to conn
+ writeBuf []byte // frame is constructed in this buffer.
+ writePool BufferPool
+ writeBufSize int
+ writeDeadline time.Time
+ writer io.WriteCloser // the current writer returned to the application
+ isWriting bool // for best-effort concurrent write detection
+
+ writeErrMu sync.Mutex
+ writeErr error
+
+ enableWriteCompression bool
+ compressionLevel int
+ newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
+
+ // Read fields
+ reader io.ReadCloser // the current reader returned to the application
+ readErr error
+ br *bufio.Reader
+ // bytes remaining in current frame.
+ // set setReadRemaining to safely update this value and prevent overflow
+ readRemaining int64
+ readFinal bool // true the current message has more frames.
+ readLength int64 // Message size.
+ readLimit int64 // Maximum message size.
+ readMaskPos int
+ readMaskKey [4]byte
+ handlePong func(string) error
+ handlePing func(string) error
+ handleClose func(int, string) error
+ readErrCount int
+ messageReader *messageReader // the current low-level reader
+
+ readDecompress bool // whether last read frame had RSV1 set
+ newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
+
+ if br == nil {
+ if readBufferSize == 0 {
+ readBufferSize = defaultReadBufferSize
+ } else if readBufferSize < maxControlFramePayloadSize {
+ // must be large enough for control frame
+ readBufferSize = maxControlFramePayloadSize
+ }
+ br = bufio.NewReaderSize(conn, readBufferSize)
+ }
+
+ if writeBufferSize <= 0 {
+ writeBufferSize = defaultWriteBufferSize
+ }
+ writeBufferSize += maxFrameHeaderSize
+
+ if writeBuf == nil && writeBufferPool == nil {
+ writeBuf = make([]byte, writeBufferSize)
+ }
+
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ c := &Conn{
+ isServer: isServer,
+ br: br,
+ conn: conn,
+ mu: mu,
+ readFinal: true,
+ writeBuf: writeBuf,
+ writePool: writeBufferPool,
+ writeBufSize: writeBufferSize,
+ enableWriteCompression: true,
+ compressionLevel: defaultCompressionLevel,
+ }
+ c.SetCloseHandler(nil)
+ c.SetPingHandler(nil)
+ c.SetPongHandler(nil)
+ return c
+}
+
+// setReadRemaining tracks the number of bytes remaining on the connection. If n
+// overflows, an ErrReadLimit is returned.
+func (c *Conn) setReadRemaining(n int64) error {
+ if n < 0 {
+ return ErrReadLimit
+ }
+
+ c.readRemaining = n
+ return nil
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting
+// for a close message.
+func (c *Conn) Close() error {
+ return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+ err = hideTempErr(err)
+ c.writeErrMu.Lock()
+ if c.writeErr == nil {
+ c.writeErr = err
+ }
+ c.writeErrMu.Unlock()
+ return err
+}
+
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ c.br.Discard(len(p))
+ return p, err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
+ <-c.mu
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ if len(buf1) == 0 {
+ _, err = c.conn.Write(buf0)
+ } else {
+ err = c.writeBufs(buf0, buf1)
+ }
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if frameType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return nil
+}
+
+func (c *Conn) writeBufs(bufs ...[]byte) error {
+ b := net.Buffers(bufs)
+ _, err := b.WriteTo(c.conn)
+ return err
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+ if !isControl(messageType) {
+ return errBadWriteOpCode
+ }
+ if len(data) > maxControlFramePayloadSize {
+ return errInvalidControlFrame
+ }
+
+ b0 := byte(messageType) | finalBit
+ b1 := byte(len(data))
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+ buf = append(buf, b0, b1)
+
+ if c.isServer {
+ buf = append(buf, data...)
+ } else {
+ key := newMaskKey()
+ buf = append(buf, key[:]...)
+ buf = append(buf, data...)
+ maskBytes(key, 0, buf[6:])
+ }
+
+ d := 1000 * time.Hour
+ if !deadline.IsZero() {
+ d = deadline.Sub(time.Now())
+ if d < 0 {
+ return errWriteTimeout
+ }
+ }
+
+ timer := time.NewTimer(d)
+ select {
+ case <-c.mu:
+ timer.Stop()
+ case <-timer.C:
+ return errWriteTimeout
+ }
+ defer func() { c.mu <- struct{}{} }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ _, err = c.conn.Write(buf)
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if messageType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return err
+}
+
+// beginMessage prepares a connection and message writer for a new message.
+func (c *Conn) beginMessage(mw *messageWriter, messageType int) error {
+ // Close previous writer if not already closed by the application. It's
+ // probably better to return an error in this situation, but we cannot
+ // change this without breaking existing applications.
+ if c.writer != nil {
+ c.writer.Close()
+ c.writer = nil
+ }
+
+ if !isControl(messageType) && !isData(messageType) {
+ return errBadWriteOpCode
+ }
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ mw.c = c
+ mw.frameType = messageType
+ mw.pos = maxFrameHeaderSize
+
+ if c.writeBuf == nil {
+ wpd, ok := c.writePool.Get().(writePoolData)
+ if ok {
+ c.writeBuf = wpd.buf
+ } else {
+ c.writeBuf = make([]byte, c.writeBufSize)
+ }
+ }
+ return nil
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+//
+// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
+// PongMessage) are supported.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return nil, err
+ }
+ c.writer = &mw
+ if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+ w := c.newCompressionWriter(c.writer, c.compressionLevel)
+ mw.compress = true
+ c.writer = w
+ }
+ return c.writer, nil
+}
+
+type messageWriter struct {
+ c *Conn
+ compress bool // whether next call to flushFrame should set RSV1
+ pos int // end of data in writeBuf.
+ frameType int // type of the current frame.
+ err error
+}
+
+func (w *messageWriter) endMessage(err error) error {
+ if w.err != nil {
+ return err
+ }
+ c := w.c
+ w.err = err
+ c.writer = nil
+ if c.writePool != nil {
+ c.writePool.Put(writePoolData{buf: c.writeBuf})
+ c.writeBuf = nil
+ }
+ return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+ c := w.c
+ length := w.pos - maxFrameHeaderSize + len(extra)
+
+ // Check for invalid control frames.
+ if isControl(w.frameType) &&
+ (!final || length > maxControlFramePayloadSize) {
+ return w.endMessage(errInvalidControlFrame)
+ }
+
+ b0 := byte(w.frameType)
+ if final {
+ b0 |= finalBit
+ }
+ if w.compress {
+ b0 |= rsv1Bit
+ }
+ w.compress = false
+
+ b1 := byte(0)
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ // Assume that the frame starts at beginning of c.writeBuf.
+ framePos := 0
+ if c.isServer {
+ // Adjust up if mask not included in the header.
+ framePos = 4
+ }
+
+ switch {
+ case length >= 65536:
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 127
+ binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+ case length > 125:
+ framePos += 6
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 126
+ binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+ default:
+ framePos += 8
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | byte(length)
+ }
+
+ if !c.isServer {
+ key := newMaskKey()
+ copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+ maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+ if len(extra) > 0 {
+ return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode")))
+ }
+ }
+
+ // Write the buffers to the connection with best-effort detection of
+ // concurrent writes. See the concurrency section in the package
+ // documentation for more info.
+
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+
+ err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+
+ if err != nil {
+ return w.endMessage(err)
+ }
+
+ if final {
+ w.endMessage(errWriteClosed)
+ return nil
+ }
+
+ // Setup for next frame.
+ w.pos = maxFrameHeaderSize
+ w.frameType = continuationFrame
+ return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+ n := len(w.c.writeBuf) - w.pos
+ if n <= 0 {
+ if err := w.flushFrame(false, nil); err != nil {
+ return 0, err
+ }
+ n = len(w.c.writeBuf) - w.pos
+ }
+ if n > max {
+ n = max
+ }
+ return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+ // Don't buffer large messages.
+ err := w.flushFrame(false, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for {
+ if w.pos == len(w.c.writeBuf) {
+ err = w.flushFrame(false, nil)
+ if err != nil {
+ break
+ }
+ }
+ var n int
+ n, err = r.Read(w.c.writeBuf[w.pos:])
+ w.pos += n
+ nn += int64(n)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ }
+ return nn, err
+}
+
+func (w *messageWriter) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+ return w.flushFrame(true, nil)
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+ frameType, frameData, err := pm.frame(prepareKey{
+ isServer: c.isServer,
+ compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+ compressionLevel: c.compressionLevel,
+ })
+ if err != nil {
+ return err
+ }
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+ err = c.write(frameType, c.writeDeadline, frameData, nil)
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+ return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+ // Fast path with no allocations and single frame.
+
+ var mw messageWriter
+ if err := c.beginMessage(&mw, messageType); err != nil {
+ return err
+ }
+ n := copy(c.writeBuf[mw.pos:], data)
+ mw.pos += n
+ data = data[n:]
+ return mw.flushFrame(true, data)
+ }
+
+ w, err := c.NextWriter(messageType)
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(data); err != nil {
+ return err
+ }
+ return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline = t
+ return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+ // 1. Skip remainder of previous frame.
+
+ if c.readRemaining > 0 {
+ if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 2. Read and parse first two bytes of frame header.
+ // To aid debugging, collect and report all errors in the first two bytes
+ // of the header.
+
+ var errors []string
+
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ frameType := int(p[0] & 0xf)
+ final := p[0]&finalBit != 0
+ rsv1 := p[0]&rsv1Bit != 0
+ rsv2 := p[0]&rsv2Bit != 0
+ rsv3 := p[0]&rsv3Bit != 0
+ mask := p[1]&maskBit != 0
+ c.setReadRemaining(int64(p[1] & 0x7f))
+
+ c.readDecompress = false
+ if rsv1 {
+ if c.newDecompressionReader != nil {
+ c.readDecompress = true
+ } else {
+ errors = append(errors, "RSV1 set")
+ }
+ }
+
+ if rsv2 {
+ errors = append(errors, "RSV2 set")
+ }
+
+ if rsv3 {
+ errors = append(errors, "RSV3 set")
+ }
+
+ switch frameType {
+ case CloseMessage, PingMessage, PongMessage:
+ if c.readRemaining > maxControlFramePayloadSize {
+ errors = append(errors, "len > 125 for control")
+ }
+ if !final {
+ errors = append(errors, "FIN not set on control")
+ }
+ case TextMessage, BinaryMessage:
+ if !c.readFinal {
+ errors = append(errors, "data before FIN")
+ }
+ c.readFinal = final
+ case continuationFrame:
+ if c.readFinal {
+ errors = append(errors, "continuation after FIN")
+ }
+ c.readFinal = final
+ default:
+ errors = append(errors, "bad opcode "+strconv.Itoa(frameType))
+ }
+
+ if mask != c.isServer {
+ errors = append(errors, "bad MASK")
+ }
+
+ if len(errors) > 0 {
+ return noFrame, c.handleProtocolError(strings.Join(errors, ", "))
+ }
+
+ // 3. Read and parse frame length as per
+ // https://tools.ietf.org/html/rfc6455#section-5.2
+ //
+ // The length of the "Payload data", in bytes: if 0-125, that is the payload
+ // length.
+ // - If 126, the following 2 bytes interpreted as a 16-bit unsigned
+ // integer are the payload length.
+ // - If 127, the following 8 bytes interpreted as
+ // a 64-bit unsigned integer (the most significant bit MUST be 0) are the
+ // payload length. Multibyte length quantities are expressed in network byte
+ // order.
+
+ switch c.readRemaining {
+ case 126:
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil {
+ return noFrame, err
+ }
+ case 127:
+ p, err := c.read(8)
+ if err != nil {
+ return noFrame, err
+ }
+
+ if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 4. Handle frame masking.
+
+ if mask {
+ c.readMaskPos = 0
+ p, err := c.read(len(c.readMaskKey))
+ if err != nil {
+ return noFrame, err
+ }
+ copy(c.readMaskKey[:], p)
+ }
+
+ // 5. For text and binary messages, enforce read limit and return.
+
+ if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+ c.readLength += c.readRemaining
+ // Don't allow readLength to overflow in the presence of a large readRemaining
+ // counter.
+ if c.readLength < 0 {
+ return noFrame, ErrReadLimit
+ }
+
+ if c.readLimit > 0 && c.readLength > c.readLimit {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+ return noFrame, ErrReadLimit
+ }
+
+ return frameType, nil
+ }
+
+ // 6. Read control frame payload.
+
+ var payload []byte
+ if c.readRemaining > 0 {
+ payload, err = c.read(int(c.readRemaining))
+ c.setReadRemaining(0)
+ if err != nil {
+ return noFrame, err
+ }
+ if c.isServer {
+ maskBytes(c.readMaskKey, 0, payload)
+ }
+ }
+
+ // 7. Process control frame payload.
+
+ switch frameType {
+ case PongMessage:
+ if err := c.handlePong(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case PingMessage:
+ if err := c.handlePing(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case CloseMessage:
+ closeCode := CloseNoStatusReceived
+ closeText := ""
+ if len(payload) >= 2 {
+ closeCode = int(binary.BigEndian.Uint16(payload))
+ if !isValidReceivedCloseCode(closeCode) {
+ return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode))
+ }
+ closeText = string(payload[2:])
+ if !utf8.ValidString(closeText) {
+ return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+ }
+ }
+ if err := c.handleClose(closeCode, closeText); err != nil {
+ return noFrame, err
+ }
+ return noFrame, &CloseError{Code: closeCode, Text: closeText}
+ }
+
+ return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+ data := FormatCloseMessage(CloseProtocolError, message)
+ if len(data) > maxControlFramePayloadSize {
+ data = data[:maxControlFramePayloadSize]
+ }
+ c.WriteControl(CloseMessage, data, time.Now().Add(writeWait))
+ return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+ // Close previous reader, only relevant for decompression.
+ if c.reader != nil {
+ c.reader.Close()
+ c.reader = nil
+ }
+
+ c.messageReader = nil
+ c.readLength = 0
+
+ for c.readErr == nil {
+ frameType, err := c.advanceFrame()
+ if err != nil {
+ c.readErr = hideTempErr(err)
+ break
+ }
+
+ if frameType == TextMessage || frameType == BinaryMessage {
+ c.messageReader = &messageReader{c}
+ c.reader = c.messageReader
+ if c.readDecompress {
+ c.reader = c.newDecompressionReader(c.reader)
+ }
+ return frameType, c.reader, nil
+ }
+ }
+
+ // Applications that do handle the error returned from this method spin in
+ // tight loop on connection failure. To help application developers detect
+ // this error, panic on repeated reads to the failed connection.
+ c.readErrCount++
+ if c.readErrCount >= 1000 {
+ panic("repeated read on failed websocket connection")
+ }
+
+ return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+ c := r.c
+ if c.messageReader != r {
+ return 0, io.EOF
+ }
+
+ for c.readErr == nil {
+
+ if c.readRemaining > 0 {
+ if int64(len(b)) > c.readRemaining {
+ b = b[:c.readRemaining]
+ }
+ n, err := c.br.Read(b)
+ c.readErr = hideTempErr(err)
+ if c.isServer {
+ c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+ }
+ rem := c.readRemaining
+ rem -= int64(n)
+ c.setReadRemaining(rem)
+ if c.readRemaining > 0 && c.readErr == io.EOF {
+ c.readErr = errUnexpectedEOF
+ }
+ return n, c.readErr
+ }
+
+ if c.readFinal {
+ c.messageReader = nil
+ return 0, io.EOF
+ }
+
+ frameType, err := c.advanceFrame()
+ switch {
+ case err != nil:
+ c.readErr = hideTempErr(err)
+ case frameType == TextMessage || frameType == BinaryMessage:
+ c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+ }
+ }
+
+ err := c.readErr
+ if err == io.EOF && c.messageReader == r {
+ err = errUnexpectedEOF
+ }
+ return 0, err
+}
+
+func (r *messageReader) Close() error {
+ return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+ var r io.Reader
+ messageType, r, err = c.NextReader()
+ if err != nil {
+ return messageType, nil, err
+ }
+ p, err = ioutil.ReadAll(r)
+ return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close message to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+ c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+ return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close
+// message back to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// close messages as described in the section on Control Messages above.
+//
+// The connection read methods return a CloseError when a close message is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close message back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+ if h == nil {
+ h = func(code int, text string) error {
+ message := FormatCloseMessage(code, "")
+ c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+ return nil
+ }
+ }
+ c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+ return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING message application data. The default
+// ping handler sends a pong to the peer.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// ping messages as described in the section on Control Messages above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(message string) error {
+ err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+ if err == ErrCloseSent {
+ return nil
+ } else if e, ok := err.(net.Error); ok && e.Temporary() {
+ return nil
+ }
+ return err
+ }
+ }
+ c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+ return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG message application data. The default
+// pong handler does nothing.
+//
+// The handler function is called from the NextReader, ReadMessage and message
+// reader Read methods. The application must read the connection to process
+// pong messages as described in the section on Control Messages above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(string) error { return nil }
+ }
+ c.handlePong = h
+}
+
+// NetConn returns the underlying connection that is wrapped by c.
+// Note that writing to or reading from this connection directly will corrupt the
+// WebSocket connection.
+func (c *Conn) NetConn() net.Conn {
+ return c.conn
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+// Deprecated: Use the NetConn method.
+func (c *Conn) UnderlyingConn() net.Conn {
+ return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+ c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+ if !isValidCompressionLevel(level) {
+ return errors.New("websocket: invalid compression level")
+ }
+ c.compressionLevel = level
+ return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+// An empty message is returned for code CloseNoStatusReceived.
+func FormatCloseMessage(closeCode int, text string) []byte {
+ if closeCode == CloseNoStatusReceived {
+ // Return empty message because it's illegal to send
+ // CloseNoStatusReceived. Return non-nil value in case application
+ // checks for nil.
+ return []byte{}
+ }
+ buf := make([]byte, 2+len(text))
+ binary.BigEndian.PutUint16(buf, uint16(closeCode))
+ copy(buf[2:], text)
+ return buf
+}
diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go
new file mode 100644
index 0000000..8db0cef
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/doc.go
@@ -0,0 +1,227 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application calls
+// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
+//
+// var upgrader = websocket.Upgrader{
+// ReadBufferSize: 1024,
+// WriteBufferSize: 1024,
+// }
+//
+// func handler(w http.ResponseWriter, r *http.Request) {
+// conn, err := upgrader.Upgrade(w, r, nil)
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// ... Use conn to send and receive messages.
+// }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+// for {
+// messageType, p, err := conn.ReadMessage()
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// if err := conn.WriteMessage(messageType, p); err != nil {
+// log.Println(err)
+// return
+// }
+// }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+// for {
+// messageType, r, err := conn.NextReader()
+// if err != nil {
+// return
+// }
+// w, err := conn.NextWriter(messageType)
+// if err != nil {
+// return err
+// }
+// if _, err := io.Copy(w, r); err != nil {
+// return err
+// }
+// if err := w.Close(); err != nil {
+// return err
+// }
+// }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by calling the handler function
+// set with the SetCloseHandler method and by returning a *CloseError from the
+// NextReader, ReadMessage or the message Read method. The default close
+// handler sends a close message to the peer.
+//
+// Connections handle received ping messages by calling the handler function
+// set with the SetPingHandler method. The default ping handler sends a pong
+// message to the peer.
+//
+// Connections handle received pong messages by calling the handler function
+// set with the SetPongHandler method. The default pong handler does nothing.
+// If an application sends ping messages, then the application should set a
+// pong handler to receive the corresponding pong.
+//
+// The control message handler functions are called from the NextReader,
+// ReadMessage and message reader Read methods. The default close and ping
+// handlers can block these methods for a short time when the handler writes to
+// the connection.
+//
+// The application must read the connection to process close, ping and pong
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+// func readLoop(c *websocket.Conn) {
+// for {
+// if _, _, err := c.NextReader(); err != nil {
+// c.Close()
+// break
+// }
+// }
+// }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and the Origin host is
+// not equal to the Host request header.
+//
+// The deprecated package-level Upgrade function does not perform origin
+// checking. The application is responsible for checking the Origin header
+// before calling the Upgrade function.
+//
+// Buffers
+//
+// Connections buffer network input and output to reduce the number
+// of system calls when reading or writing messages.
+//
+// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
+// Section 5 for a discussion of message framing. A WebSocket frame header is
+// written to the network each time a write buffer is flushed to the network.
+// Decreasing the size of the write buffer can increase the amount of framing
+// overhead on the connection.
+//
+// The buffer sizes in bytes are specified by the ReadBufferSize and
+// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
+// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
+// buffers created by the HTTP server when a buffer size field is set to zero.
+// The HTTP server buffers have a size of 4096 at the time of this writing.
+//
+// The buffer sizes do not limit the size of a message that can be read or
+// written by a connection.
+//
+// Buffers are held for the lifetime of the connection by default. If the
+// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
+// write buffer only when writing a message.
+//
+// Applications should tune the buffer sizes to balance memory use and
+// performance. Increasing the buffer size uses more memory, but can reduce the
+// number of system calls to read or write the network. In the case of writing,
+// increasing the buffer size can reduce the number of frame headers written to
+// the network.
+//
+// Some guidelines for setting buffer parameters are:
+//
+// Limit the buffer sizes to the maximum expected message size. Buffers larger
+// than the largest message do not provide any benefit.
+//
+// Depending on the distribution of message sizes, setting the buffer size to
+// a value less than the maximum expected message size can greatly reduce memory
+// use with a small impact on performance. Here's an example: If 99% of the
+// messages are smaller than 256 bytes and the maximum message size is 512
+// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
+// than a buffer size of 512 bytes. The memory savings is 50%.
+//
+// A write buffer pool is useful when the application has a modest number
+// writes over a large number of connections. when buffers are pooled, a larger
+// buffer size has a reduced impact on total memory use and has the benefit of
+// reducing system calls and frame overhead.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+// var upgrader = websocket.Upgrader{
+// EnableCompression: true,
+// }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+// conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket
diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go
new file mode 100644
index 0000000..c64f8c8
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/join.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "io"
+ "strings"
+)
+
+// JoinMessages concatenates received messages to create a single io.Reader.
+// The string term is appended to each message. The returned reader does not
+// support concurrent calls to the Read method.
+func JoinMessages(c *Conn, term string) io.Reader {
+ return &joinReader{c: c, term: term}
+}
+
+type joinReader struct {
+ c *Conn
+ term string
+ r io.Reader
+}
+
+func (r *joinReader) Read(p []byte) (int, error) {
+ if r.r == nil {
+ var err error
+ _, r.r, err = r.c.NextReader()
+ if err != nil {
+ return 0, err
+ }
+ if r.term != "" {
+ r.r = io.MultiReader(r.r, strings.NewReader(r.term))
+ }
+ }
+ n, err := r.r.Read(p)
+ if err == io.EOF {
+ err = nil
+ r.r = nil
+ }
+ return n, err
+}
diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go
new file mode 100644
index 0000000..dc2c1f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/json.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// Deprecated: Use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+ return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+ w, err := c.NextWriter(TextMessage)
+ if err != nil {
+ return err
+ }
+ err1 := json.NewEncoder(w).Encode(v)
+ err2 := w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// Deprecated: Use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+ return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+ _, r, err := c.NextReader()
+ if err != nil {
+ return err
+ }
+ err = json.NewDecoder(r).Decode(v)
+ if err == io.EOF {
+ // One value is expected in the message.
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
new file mode 100644
index 0000000..d0742bf
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask.go
@@ -0,0 +1,55 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+//go:build !appengine
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ // Mask one byte at a time for small buffers.
+ if len(b) < 2*wordSize {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+ }
+
+ // Mask one byte at a time to word boundary.
+ if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+ n = wordSize - n
+ for i := range b[:n] {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ b = b[n:]
+ }
+
+ // Create aligned word size key.
+ var k [wordSize]byte
+ for i := range k {
+ k[i] = key[(pos+i)&3]
+ }
+ kw := *(*uintptr)(unsafe.Pointer(&k))
+
+ // Mask one word at a time.
+ n := (len(b) / wordSize) * wordSize
+ for i := 0; i < n; i += wordSize {
+ *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+ }
+
+ // Mask one byte at a time for remaining bytes.
+ b = b[n:]
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go
new file mode 100644
index 0000000..36250ca
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask_safe.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+//go:build appengine
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go
new file mode 100644
index 0000000..c854225
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/prepared.go
@@ -0,0 +1,102 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "net"
+ "sync"
+ "time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+ messageType int
+ data []byte
+ mu sync.Mutex
+ frames map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+ isServer bool
+ compress bool
+ compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+ once sync.Once
+ data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+ pm := &PreparedMessage{
+ messageType: messageType,
+ frames: make(map[prepareKey]*preparedFrame),
+ data: data,
+ }
+
+ // Prepare a plain server frame.
+ _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+ if err != nil {
+ return nil, err
+ }
+
+ // To protect against caller modifying the data argument, remember the data
+ // copied to the plain server frame.
+ pm.data = frameData[len(frameData)-len(data):]
+ return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+ pm.mu.Lock()
+ frame, ok := pm.frames[key]
+ if !ok {
+ frame = &preparedFrame{}
+ pm.frames[key] = frame
+ }
+ pm.mu.Unlock()
+
+ var err error
+ frame.once.Do(func() {
+ // Prepare a frame using a 'fake' connection.
+ // TODO: Refactor code in conn.go to allow more direct construction of
+ // the frame.
+ mu := make(chan struct{}, 1)
+ mu <- struct{}{}
+ var nc prepareConn
+ c := &Conn{
+ conn: &nc,
+ mu: mu,
+ isServer: key.isServer,
+ compressionLevel: key.compressionLevel,
+ enableWriteCompression: true,
+ writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+ }
+ if key.compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ }
+ err = c.WriteMessage(pm.messageType, pm.data)
+ frame.data = nc.buf.Bytes()
+ })
+ return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+ buf bytes.Buffer
+ net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go
new file mode 100644
index 0000000..e0f466b
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/proxy.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/base64"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type netDialerFunc func(network, addr string) (net.Conn, error)
+
+func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
+ return fn(network, addr)
+}
+
+func init() {
+ proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+ return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
+ })
+}
+
+type httpProxyDialer struct {
+ proxyURL *url.URL
+ forwardDial func(network, addr string) (net.Conn, error)
+}
+
+func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
+ hostPort, _ := hostPortNoPort(hpd.proxyURL)
+ conn, err := hpd.forwardDial(network, hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connectHeader := make(http.Header)
+ if user := hpd.proxyURL.User; user != nil {
+ proxyUser := user.Username()
+ if proxyPassword, passwordSet := user.Password(); passwordSet {
+ credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+ connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+ }
+ }
+
+ connectReq := &http.Request{
+ Method: http.MethodConnect,
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: connectHeader,
+ }
+
+ if err := connectReq.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Read response. It's OK to use and discard buffered reader here becaue
+ // the remote server does not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ conn.Close()
+ f := strings.SplitN(resp.Status, " ", 2)
+ return nil, errors.New(f[1])
+ }
+ return conn, nil
+}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
new file mode 100644
index 0000000..bb33597
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -0,0 +1,365 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+ message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+//
+// It is safe to call Upgrader's methods concurrently.
+type Upgrader struct {
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
+ // size is zero, then buffers allocated by the HTTP server are used. The
+ // I/O buffer sizes do not limit the size of the messages that can be sent
+ // or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
+ // Subprotocols specifies the server's supported protocols in order of
+ // preference. If this field is not nil, then the Upgrade method negotiates a
+ // subprotocol by selecting the first match in this list with a protocol
+ // requested by the client. If there's no match, then no protocol is
+ // negotiated (the Sec-Websocket-Protocol header is not included in the
+ // handshake response).
+ Subprotocols []string
+
+ // Error specifies the function for generating HTTP error responses. If Error
+ // is nil, then http.Error is used to generate the HTTP response.
+ Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+ // CheckOrigin returns true if the request Origin header is acceptable. If
+ // CheckOrigin is nil, then a safe default is used: return false if the
+ // Origin request header is present and the origin host is not equal to
+ // request Host header.
+ //
+ // A CheckOrigin function should carefully validate the request origin to
+ // prevent cross-site request forgery.
+ CheckOrigin func(r *http.Request) bool
+
+ // EnableCompression specify if the server should attempt to negotiate per
+ // message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+ err := HandshakeError{reason}
+ if u.Error != nil {
+ u.Error(w, r, status, err)
+ } else {
+ w.Header().Set("Sec-Websocket-Version", "13")
+ http.Error(w, http.StatusText(status), status)
+ }
+ return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ return equalASCIIFold(u.Host, r.Host)
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+ if u.Subprotocols != nil {
+ clientProtocols := Subprotocols(r)
+ for _, serverProtocol := range u.Subprotocols {
+ for _, clientProtocol := range clientProtocols {
+ if clientProtocol == serverProtocol {
+ return clientProtocol
+ }
+ }
+ }
+ } else if responseHeader != nil {
+ return responseHeader.Get("Sec-Websocket-Protocol")
+ }
+ return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie). To specify
+// subprotocols supported by the server, set Upgrader.Subprotocols directly.
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+ const badHandshake = "websocket: the client is not using the websocket protocol: "
+
+ if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
+ }
+
+ if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
+ }
+
+ if r.Method != http.MethodGet {
+ return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
+ }
+
+ if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+ }
+
+ if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
+ }
+
+ checkOrigin := u.CheckOrigin
+ if checkOrigin == nil {
+ checkOrigin = checkSameOrigin
+ }
+ if !checkOrigin(r) {
+ return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
+ }
+
+ challengeKey := r.Header.Get("Sec-Websocket-Key")
+ if !isValidChallengeKey(challengeKey) {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length")
+ }
+
+ subprotocol := u.selectSubprotocol(r, responseHeader)
+
+ // Negotiate PMCE
+ var compress bool
+ if u.EnableCompression {
+ for _, ext := range parseExtensions(r.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ compress = true
+ break
+ }
+ }
+
+ h, ok := w.(http.Hijacker)
+ if !ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+ }
+ var brw *bufio.ReadWriter
+ netConn, brw, err := h.Hijack()
+ if err != nil {
+ return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+ }
+
+ if brw.Reader.Buffered() > 0 {
+ netConn.Close()
+ return nil, errors.New("websocket: client sent data before handshake is complete")
+ }
+
+ var br *bufio.Reader
+ if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
+ // Reuse hijacked buffered reader as connection reader.
+ br = brw.Reader
+ }
+
+ buf := bufioWriterBuffer(netConn, brw.Writer)
+
+ var writeBuf []byte
+ if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
+ // Reuse hijacked write buffer as connection buffer.
+ writeBuf = buf
+ }
+
+ c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
+ c.subprotocol = subprotocol
+
+ if compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ c.newDecompressionReader = decompressNoContextTakeover
+ }
+
+ // Use larger of hijacked buffer and connection write buffer for header.
+ p := buf
+ if len(c.writeBuf) > len(p) {
+ p = c.writeBuf
+ }
+ p = p[:0]
+
+ p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+ p = append(p, computeAcceptKey(challengeKey)...)
+ p = append(p, "\r\n"...)
+ if c.subprotocol != "" {
+ p = append(p, "Sec-WebSocket-Protocol: "...)
+ p = append(p, c.subprotocol...)
+ p = append(p, "\r\n"...)
+ }
+ if compress {
+ p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+ }
+ for k, vs := range responseHeader {
+ if k == "Sec-Websocket-Protocol" {
+ continue
+ }
+ for _, v := range vs {
+ p = append(p, k...)
+ p = append(p, ": "...)
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if b <= 31 {
+ // prevent response splitting.
+ b = ' '
+ }
+ p = append(p, b)
+ }
+ p = append(p, "\r\n"...)
+ }
+ }
+ p = append(p, "\r\n"...)
+
+ // Clear deadlines set by HTTP server.
+ netConn.SetDeadline(time.Time{})
+
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+ }
+ if _, err = netConn.Write(p); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Time{})
+ }
+
+ return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// Deprecated: Use websocket.Upgrader instead.
+//
+// Upgrade does not perform origin checking. The application is responsible for
+// checking the Origin header before calling Upgrade. An example implementation
+// of the same origin policy check is:
+//
+// if req.Header.Get("Origin") != "http://"+req.Host {
+// http.Error(w, "Origin not allowed", http.StatusForbidden)
+// return
+// }
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+ u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+ u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+ // don't return errors to maintain backwards compatibility
+ }
+ u.CheckOrigin = func(r *http.Request) bool {
+ // allow all connections by default
+ return true
+ }
+ return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+ h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+ if h == "" {
+ return nil
+ }
+ protocols := strings.Split(h, ",")
+ for i := range protocols {
+ protocols[i] = strings.TrimSpace(protocols[i])
+ }
+ return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+ return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+ tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}
+
+// bufioReaderSize size returns the size of a bufio.Reader.
+func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
+ // This code assumes that peek on a reset reader returns
+ // bufio.Reader.buf[:0].
+ // TODO: Use bufio.Reader.Size() after Go 1.10
+ br.Reset(originalReader)
+ if p, err := br.Peek(0); err == nil {
+ return cap(p)
+ }
+ return 0
+}
+
+// writeHook is an io.Writer that records the last slice passed to it vio
+// io.Writer.Write.
+type writeHook struct {
+ p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+ wh.p = p
+ return len(p), nil
+}
+
+// bufioWriterBuffer grabs the buffer from a bufio.Writer.
+func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
+ // This code assumes that bufio.Writer.buf[:1] is passed to the
+ // bufio.Writer's underlying writer.
+ var wh writeHook
+ bw.Reset(&wh)
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(originalWriter)
+
+ return wh.p[:cap(wh.p)]
+}
diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go
new file mode 100644
index 0000000..a62b68c
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/tls_handshake.go
@@ -0,0 +1,21 @@
+//go:build go1.17
+// +build go1.17
+
+package websocket
+
+import (
+ "context"
+ "crypto/tls"
+)
+
+func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
+ if err := tlsConn.HandshakeContext(ctx); err != nil {
+ return err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go
new file mode 100644
index 0000000..e1b2b44
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go
@@ -0,0 +1,21 @@
+//go:build !go1.17
+// +build !go1.17
+
+package websocket
+
+import (
+ "context"
+ "crypto/tls"
+)
+
+func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
new file mode 100644
index 0000000..31a5dee
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -0,0 +1,298 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "io"
+ "net/http"
+ "strings"
+ "unicode/utf8"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+ h := sha1.New()
+ h.Write([]byte(challengeKey))
+ h.Write(keyGUID)
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+ p := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, p); err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Token octets per RFC 2616.
+var isTokenOctet = [256]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+// skipSpace returns a slice of the string s with all leading RFC 2616 linear
+// whitespace removed.
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if b := s[i]; b != ' ' && b != '\t' {
+ break
+ }
+ }
+ return s[i:]
+}
+
+// nextToken returns the leading RFC 2616 token of s and the string following
+// the token.
+func nextToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if !isTokenOctet[s[i]] {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
+// and the string following the token or quoted string.
+func nextTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return nextToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// equalASCIIFold returns true if s is equal to t with ASCII case folding as
+// defined in RFC 4790.
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains a token equal to value with ASCII case folding.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+ for _, s := range header[name] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ if equalASCIIFold(t, value) {
+ return true
+ }
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return false
+}
+
+// parseExtensions parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+ // From RFC 6455:
+ //
+ // Sec-WebSocket-Extensions = extension-list
+ // extension-list = 1#extension
+ // extension = extension-token *( ";" extension-param )
+ // extension-token = registered-token
+ // registered-token = token
+ // extension-param = token [ "=" (token | quoted-string) ]
+ // ;When using the quoted-string syntax variant, the value
+ // ;after quoted-string unescaping MUST conform to the
+ // ;'token' ABNF.
+
+ var result []map[string]string
+headers:
+ for _, s := range header["Sec-Websocket-Extensions"] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ ext := map[string]string{"": t}
+ for {
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ";") {
+ break
+ }
+ var k string
+ k, s = nextToken(skipSpace(s[1:]))
+ if k == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ var v string
+ if strings.HasPrefix(s, "=") {
+ v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+ s = skipSpace(s)
+ }
+ if s != "" && s[0] != ',' && s[0] != ';' {
+ continue headers
+ }
+ ext[k] = v
+ }
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ result = append(result, ext)
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return result
+}
+
+// isValidChallengeKey checks if the argument meets RFC6455 specification.
+func isValidChallengeKey(s string) bool {
+ // From RFC6455:
+ //
+ // A |Sec-WebSocket-Key| header field with a base64-encoded (see
+ // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in
+ // length.
+
+ if s == "" {
+ return false
+ }
+ decoded, err := base64.StdEncoding.DecodeString(s)
+ return err == nil && len(decoded) == 16
+}
diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go
new file mode 100644
index 0000000..2e668f6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+//
+
+package websocket
+
+import (
+ "errors"
+ "io"
+ "net"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type proxy_direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var proxy_Direct = proxy_direct{}
+
+func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type proxy_PerHost struct {
+ def, bypass proxy_Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
+ return &proxy_PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *proxy_PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *proxy_PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *proxy_PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *proxy_PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
+
+// A Dialer is a means to establish a connection.
+type proxy_Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type proxy_Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func proxy_FromEnvironment() proxy_Dialer {
+ allProxy := proxy_allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return proxy_Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return proxy_Direct
+ }
+ proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
+ if err != nil {
+ return proxy_Direct
+ }
+
+ noProxy := proxy_noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := proxy_NewPerHost(proxy, proxy_Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
+ if proxy_proxySchemes == nil {
+ proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
+ }
+ proxy_proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
+ var auth *proxy_Auth
+ if u.User != nil {
+ auth = new(proxy_Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return proxy_SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxy_proxySchemes != nil {
+ if f, ok := proxy_proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ proxy_allProxyEnv = &proxy_envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ proxy_noProxyEnv = &proxy_envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type proxy_envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *proxy_envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *proxy_envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928 and RFC 1929.
+func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
+ s := &proxy_socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type proxy_socks5 struct {
+ user, password string
+ network, addr string
+ forward proxy_Dialer
+}
+
+const proxy_socks5Version = 5
+
+const (
+ proxy_socks5AuthNone = 0
+ proxy_socks5AuthPassword = 2
+)
+
+const proxy_socks5Connect = 1
+
+const (
+ proxy_socks5IP4 = 1
+ proxy_socks5Domain = 3
+ proxy_socks5IP6 = 4
+)
+
+var proxy_socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the given network via the SOCKS5 proxy.
+func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.connect(conn, addr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *proxy_socks5) connect(conn net.Conn, target string) error {
+ host, portStr, err := net.SplitHostPort(target)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, proxy_socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ // See RFC 1929
+ if buf[1] == proxy_socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, proxy_socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, proxy_socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return errors.New("proxy: destination host name too long: " + host)
+ }
+ buf = append(buf, proxy_socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(proxy_socks5Errors) {
+ failure = proxy_socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case proxy_socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case proxy_socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case proxy_socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 0000000..5f920e9
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2022 Alan Shreve (@inconshreveable)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 0000000..7a950d1
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 0000000..06a91f0
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,16 @@
+//go:build !windows
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 0000000..0c56880
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,42 @@
+package mousetrap
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(syscall.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/jackc/pgpassfile/.travis.yml b/vendor/github.com/jackc/pgpassfile/.travis.yml
new file mode 100644
index 0000000..e176228
--- /dev/null
+++ b/vendor/github.com/jackc/pgpassfile/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+ - 1.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
diff --git a/vendor/github.com/jackc/pgpassfile/LICENSE b/vendor/github.com/jackc/pgpassfile/LICENSE
new file mode 100644
index 0000000..c1c4f50
--- /dev/null
+++ b/vendor/github.com/jackc/pgpassfile/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2019 Jack Christensen
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/jackc/pgpassfile/README.md b/vendor/github.com/jackc/pgpassfile/README.md
new file mode 100644
index 0000000..661289e
--- /dev/null
+++ b/vendor/github.com/jackc/pgpassfile/README.md
@@ -0,0 +1,8 @@
+[](https://godoc.org/github.com/jackc/pgpassfile)
+[](https://travis-ci.org/jackc/pgpassfile)
+
+# pgpassfile
+
+Package pgpassfile is a parser PostgreSQL .pgpass files.
+
+Extracted and rewritten from original implementation in https://github.com/jackc/pgx.
diff --git a/vendor/github.com/jackc/pgpassfile/pgpass.go b/vendor/github.com/jackc/pgpassfile/pgpass.go
new file mode 100644
index 0000000..f7eed3c
--- /dev/null
+++ b/vendor/github.com/jackc/pgpassfile/pgpass.go
@@ -0,0 +1,110 @@
+// Package pgpassfile is a parser PostgreSQL .pgpass files.
+package pgpassfile
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "regexp"
+ "strings"
+)
+
+// Entry represents a line in a PG passfile.
+type Entry struct {
+ Hostname string
+ Port string
+ Database string
+ Username string
+ Password string
+}
+
+// Passfile is the in memory data structure representing a PG passfile.
+type Passfile struct {
+ Entries []*Entry
+}
+
+// ReadPassfile reads the file at path and parses it into a Passfile.
+func ReadPassfile(path string) (*Passfile, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return ParsePassfile(f)
+}
+
+// ParsePassfile reads r and parses it into a Passfile.
+func ParsePassfile(r io.Reader) (*Passfile, error) {
+ passfile := &Passfile{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ entry := parseLine(scanner.Text())
+ if entry != nil {
+ passfile.Entries = append(passfile.Entries, entry)
+ }
+ }
+
+ return passfile, scanner.Err()
+}
+
+// Match (not colons or escaped colon or escaped backslash)+. Essentially gives a split on unescaped
+// colon.
+var colonSplitterRegexp = regexp.MustCompile("(([^:]|(\\:)))+")
+
+// var colonSplitterRegexp = regexp.MustCompile("((?:[^:]|(?:\\:)|(?:\\\\))+)")
+
+// parseLine parses a line into an *Entry. It returns nil on comment lines or any other unparsable
+// line.
+func parseLine(line string) *Entry {
+ const (
+ tmpBackslash = "\r"
+ tmpColon = "\n"
+ )
+
+ line = strings.TrimSpace(line)
+
+ if strings.HasPrefix(line, "#") {
+ return nil
+ }
+
+ line = strings.Replace(line, `\\`, tmpBackslash, -1)
+ line = strings.Replace(line, `\:`, tmpColon, -1)
+
+ parts := strings.Split(line, ":")
+ if len(parts) != 5 {
+ return nil
+ }
+
+ // Unescape escaped colons and backslashes
+ for i := range parts {
+ parts[i] = strings.Replace(parts[i], tmpBackslash, `\`, -1)
+ parts[i] = strings.Replace(parts[i], tmpColon, `:`, -1)
+ }
+
+ return &Entry{
+ Hostname: parts[0],
+ Port: parts[1],
+ Database: parts[2],
+ Username: parts[3],
+ Password: parts[4],
+ }
+}
+
+// FindPassword finds the password for the provided hostname, port, database, and username. For a
+// Unix domain socket hostname must be set to "localhost". An empty string will be returned if no
+// match is found.
+//
+// See https://www.postgresql.org/docs/current/libpq-pgpass.html for more password file information.
+func (pf *Passfile) FindPassword(hostname, port, database, username string) (password string) {
+ for _, e := range pf.Entries {
+ if (e.Hostname == "*" || e.Hostname == hostname) &&
+ (e.Port == "*" || e.Port == port) &&
+ (e.Database == "*" || e.Database == database) &&
+ (e.Username == "*" || e.Username == username) {
+ return e.Password
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/jackc/pgservicefile/LICENSE b/vendor/github.com/jackc/pgservicefile/LICENSE
new file mode 100644
index 0000000..f1b4c28
--- /dev/null
+++ b/vendor/github.com/jackc/pgservicefile/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2020 Jack Christensen
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/jackc/pgservicefile/README.md b/vendor/github.com/jackc/pgservicefile/README.md
new file mode 100644
index 0000000..2fc7e01
--- /dev/null
+++ b/vendor/github.com/jackc/pgservicefile/README.md
@@ -0,0 +1,7 @@
+[](https://pkg.go.dev/github.com/jackc/pgservicefile)
+[](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml)
+
+
+# pgservicefile
+
+Package pgservicefile is a parser for PostgreSQL service files (e.g. `.pg_service.conf`).
diff --git a/vendor/github.com/jackc/pgservicefile/pgservicefile.go b/vendor/github.com/jackc/pgservicefile/pgservicefile.go
new file mode 100644
index 0000000..c62caa7
--- /dev/null
+++ b/vendor/github.com/jackc/pgservicefile/pgservicefile.go
@@ -0,0 +1,81 @@
+// Package pgservicefile is a parser for PostgreSQL service files (e.g. .pg_service.conf).
+package pgservicefile
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+type Service struct {
+ Name string
+ Settings map[string]string
+}
+
+type Servicefile struct {
+ Services []*Service
+ servicesByName map[string]*Service
+}
+
+// GetService returns the named service.
+func (sf *Servicefile) GetService(name string) (*Service, error) {
+ service, present := sf.servicesByName[name]
+ if !present {
+ return nil, errors.New("not found")
+ }
+ return service, nil
+}
+
+// ReadServicefile reads the file at path and parses it into a Servicefile.
+func ReadServicefile(path string) (*Servicefile, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return ParseServicefile(f)
+}
+
+// ParseServicefile reads r and parses it into a Servicefile.
+func ParseServicefile(r io.Reader) (*Servicefile, error) {
+ servicefile := &Servicefile{}
+
+ var service *Service
+ scanner := bufio.NewScanner(r)
+ lineNum := 0
+ for scanner.Scan() {
+ lineNum += 1
+ line := scanner.Text()
+ line = strings.TrimSpace(line)
+
+ if line == "" || strings.HasPrefix(line, "#") {
+ // ignore comments and empty lines
+ } else if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+ service = &Service{Name: line[1 : len(line)-1], Settings: make(map[string]string)}
+ servicefile.Services = append(servicefile.Services, service)
+ } else if service != nil {
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("unable to parse line %d", lineNum)
+ }
+
+ key := strings.TrimSpace(parts[0])
+ value := strings.TrimSpace(parts[1])
+
+ service.Settings[key] = value
+ } else {
+ return nil, fmt.Errorf("line %d is not in a section", lineNum)
+ }
+ }
+
+ servicefile.servicesByName = make(map[string]*Service, len(servicefile.Services))
+ for _, service := range servicefile.Services {
+ servicefile.servicesByName[service.Name] = service
+ }
+
+ return servicefile, scanner.Err()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/.gitignore b/vendor/github.com/jackc/pgx/v5/.gitignore
new file mode 100644
index 0000000..a2ebbe9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/.gitignore
@@ -0,0 +1,27 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.envrc
+/.testdb
+
+.DS_Store
diff --git a/vendor/github.com/jackc/pgx/v5/CHANGELOG.md b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md
new file mode 100644
index 0000000..1e56878
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md
@@ -0,0 +1,462 @@
+# 5.7.5 (May 17, 2025)
+
+* Support sslnegotiation connection option (divyam234)
+* Update golang.org/x/crypto to v0.37.0. This placates security scanners that were unable to see that pgx did not use the behavior affected by https://pkg.go.dev/vuln/GO-2025-3487.
+* TraceLog now logs Acquire and Release at the debug level (dave sinclair)
+* Add support for PGTZ environment variable
+* Add support for PGOPTIONS environment variable
+* Unpin memory used by Rows quicker
+* Remove PlanScan memoization. This resolves a rare issue where scanning could be broken for one type by first scanning another. The problem was in the memoization system and benchmarking revealed that memoization was not providing any meaningful benefit.
+
+# 5.7.4 (March 24, 2025)
+
+* Fix / revert change to scanning JSON `null` (Felix Röhrich)
+
+# 5.7.3 (March 21, 2025)
+
+* Expose EmptyAcquireWaitTime in pgxpool.Stat (vamshiaruru32)
+* Improve SQL sanitizer performance (ninedraft)
+* Fix Scan confusion with json(b), sql.Scanner, and automatic dereferencing (moukoublen, felix-roehrich)
+* Fix Values() for xml type always returning nil instead of []byte
+* Add ability to send Flush message in pipeline mode (zenkovev)
+* Fix pgtype.Timestamp's JSON behavior to match PostgreSQL (pconstantinou)
+* Better error messages when scanning structs (logicbomb)
+* Fix handling of error on batch write (bonnefoa)
+* Match libpq's connection fallback behavior more closely (felix-roehrich)
+* Add MinIdleConns to pgxpool (djahandarie)
+
+# 5.7.2 (December 21, 2024)
+
+* Fix prepared statement already exists on batch prepare failure
+* Add commit query to tx options (Lucas Hild)
+* Fix pgtype.Timestamp json unmarshal (Shean de Montigny-Desautels)
+* Add message body size limits in frontend and backend (zene)
+* Add xid8 type
+* Ensure planning encodes and scans cannot infinitely recurse
+* Implement pgtype.UUID.String() (Konstantin Grachev)
+* Switch from ExecParams to Exec in ValidateConnectTargetSessionAttrs functions (Alexander Rumyantsev)
+* Update golang.org/x/crypto
+* Fix json(b) columns prefer sql.Scanner interface like database/sql (Ludovico Russo)
+
+# 5.7.1 (September 10, 2024)
+
+* Fix data race in tracelog.TraceLog
+* Update puddle to v2.2.2. This removes the import of nanotime via linkname.
+* Update golang.org/x/crypto and golang.org/x/text
+
+# 5.7.0 (September 7, 2024)
+
+* Add support for sslrootcert=system (Yann Soubeyrand)
+* Add LoadTypes to load multiple types in a single SQL query (Nick Farrell)
+* Add XMLCodec supports encoding + scanning XML column type like json (nickcruess-soda)
+* Add MultiTrace (Stepan Rabotkin)
+* Add TraceLogConfig with customizable TimeKey (stringintech)
+* pgx.ErrNoRows wraps sql.ErrNoRows to aid in database/sql compatibility with native pgx functions (merlin)
+* Support scanning binary formatted uint32 into string / TextScanner (jennifersp)
+* Fix interval encoding to allow 0s and avoid extra spaces (Carlos Pérez-Aradros Herce)
+* Update pgservicefile - fixes panic when parsing invalid file
+* Better error message when reading past end of batch
+* Don't print url when url.Parse returns an error (Kevin Biju)
+* Fix snake case name normalization collision in RowToStructByName with db tag (nolandseigler)
+* Fix: Scan and encode types with underlying types of arrays
+
+# 5.6.0 (May 25, 2024)
+
+* Add StrictNamedArgs (Tomas Zahradnicek)
+* Add support for macaddr8 type (Carlos Pérez-Aradros Herce)
+* Add SeverityUnlocalized field to PgError / Notice
+* Performance optimization of RowToStructByPos/Name (Zach Olstein)
+* Allow customizing context canceled behavior for pgconn
+* Add ScanLocation to pgtype.Timestamp[tz]Codec
+* Add custom data to pgconn.PgConn
+* Fix ResultReader.Read() to handle nil values
+* Do not encode interval microseconds when they are 0 (Carlos Pérez-Aradros Herce)
+* pgconn.SafeToRetry checks for wrapped errors (tjasko)
+* Failed connection attempts include all errors
+* Optimize LargeObject.Read (Mitar)
+* Add tracing for connection acquire and release from pool (ngavinsir)
+* Fix encode driver.Valuer not called when nil
+* Add support for custom JSON marshal and unmarshal (Mitar)
+* Use Go default keepalive for TCP connections (Hans-Joachim Kliemeck)
+
+# 5.5.5 (March 9, 2024)
+
+Use spaces instead of parentheses for SQL sanitization.
+
+This still solves the problem of negative numbers creating a line comment, but this avoids breaking edge cases such as
+`set foo to $1` where the substitution is taking place in a location where an arbitrary expression is not allowed.
+
+# 5.5.4 (March 4, 2024)
+
+Fix CVE-2024-27304
+
+SQL injection can occur if an attacker can cause a single query or bind message to exceed 4 GB in size. An integer
+overflow in the calculated message size can cause the one large message to be sent as multiple messages under the
+attacker's control.
+
+Thanks to Paul Gerste for reporting this issue.
+
+* Fix behavior of CollectRows to return empty slice if Rows are empty (Felix)
+* Fix simple protocol encoding of json.RawMessage
+* Fix *Pipeline.getResults should close pipeline on error
+* Fix panic in TryFindUnderlyingTypeScanPlan (David Kurman)
+* Fix deallocation of invalidated cached statements in a transaction
+* Handle invalid sslkey file
+* Fix scan float4 into sql.Scanner
+* Fix pgtype.Bits not making copy of data from read buffer. This would cause the data to be corrupted by future reads.
+
+# 5.5.3 (February 3, 2024)
+
+* Fix: prepared statement already exists
+* Improve CopyFrom auto-conversion of text-ish values
+* Add ltree type support (Florent Viel)
+* Make some properties of Batch and QueuedQuery public (Pavlo Golub)
+* Add AppendRows function (Edoardo Spadolini)
+* Optimize convert UUID [16]byte to string (Kirill Malikov)
+* Fix: LargeObject Read and Write of more than ~1GB at a time (Mitar)
+
+# 5.5.2 (January 13, 2024)
+
+* Allow NamedArgs to start with underscore
+* pgproto3: Maximum message body length support (jeremy.spriet)
+* Upgrade golang.org/x/crypto to v0.17.0
+* Add snake_case support to RowToStructByName (Tikhon Fedulov)
+* Fix: update description cache after exec prepare (James Hartig)
+* Fix: pipeline checks if it is closed (James Hartig and Ryan Fowler)
+* Fix: normalize timeout / context errors during TLS startup (Samuel Stauffer)
+* Add OnPgError for easier centralized error handling (James Hartig)
+
+# 5.5.1 (December 9, 2023)
+
+* Add CopyFromFunc helper function. (robford)
+* Add PgConn.Deallocate method that uses PostgreSQL protocol Close message.
+* pgx uses new PgConn.Deallocate method. This allows deallocating statements to work in a failed transaction. This fixes a case where the prepared statement map could become invalid.
+* Fix: Prefer driver.Valuer over json.Marshaler for json fields. (Jacopo)
+* Fix: simple protocol SQL sanitizer previously panicked if an invalid $0 placeholder was used. This now returns an error instead. (maksymnevajdev)
+* Add pgtype.Numeric.ScanScientific (Eshton Robateau)
+
+# 5.5.0 (November 4, 2023)
+
+* Add CollectExactlyOneRow. (Julien GOTTELAND)
+* Add OpenDBFromPool to create *database/sql.DB from *pgxpool.Pool. (Lev Zakharov)
+* Prepare can automatically choose statement name based on sql. This makes it easier to explicitly manage prepared statements.
+* Statement cache now uses deterministic, stable statement names.
+* database/sql prepared statement names are deterministically generated.
+* Fix: SendBatch wasn't respecting context cancellation.
+* Fix: Timeout error from pipeline is now normalized.
+* Fix: database/sql encoding json.RawMessage to []byte.
+* CancelRequest: Wait for the cancel request to be acknowledged by the server. This should improve PgBouncer compatibility. (Anton Levakin)
+* stdlib: Use Ping instead of CheckConn in ResetSession
+* Add json.Marshaler and json.Unmarshaler for Float4, Float8 (Kirill Mironov)
+
+# 5.4.3 (August 5, 2023)
+
+* Fix: QCharArrayOID was defined with the wrong OID (Christoph Engelbert)
+* Fix: connect_timeout for sslmode=allow|prefer (smaher-edb)
+* Fix: pgxpool: background health check cannot overflow pool
+* Fix: Check for nil in defer when sending batch (recover properly from panic)
+* Fix: json scan of non-string pointer to pointer
+* Fix: zeronull.Timestamptz should use pgtype.Timestamptz
+* Fix: NewConnsCount was not correctly counting connections created by Acquire directly. (James Hartig)
+* RowTo(AddrOf)StructByPos ignores fields with "-" db tag
+* Optimization: improve text format numeric parsing (horpto)
+
+# 5.4.2 (July 11, 2023)
+
+* Fix: RowScanner errors are fatal to Rows
+* Fix: Enable failover efforts when pg_hba.conf disallows non-ssl connections (Brandon Kauffman)
+* Hstore text codec internal improvements (Evan Jones)
+* Fix: Stop timers for background reader when not in use. Fixes memory leak when closing connections (Adrian-Stefan Mares)
+* Fix: Stop background reader as soon as possible.
+* Add PgConn.SyncConn(). This combined with the above fix makes it safe to directly use the underlying net.Conn.
+
+# 5.4.1 (June 18, 2023)
+
+* Fix: concurrency bug with pgtypeDefaultMap and simple protocol (Lev Zakharov)
+* Add TxOptions.BeginQuery to allow overriding the default BEGIN query
+
+# 5.4.0 (June 14, 2023)
+
+* Replace platform specific syscalls for non-blocking IO with more traditional goroutines and deadlines. This returns to the v4 approach with some additional improvements and fixes. This restores the ability to use a pgx.Conn over an ssh.Conn as well as other non-TCP or Unix socket connections. In addition, it is a significantly simpler implementation that is less likely to have cross platform issues.
+* Optimization: The default type registrations are now shared among all connections. This saves about 100KB of memory per connection. `pgtype.Type` and `pgtype.Codec` values are now required to be immutable after registration. This was already necessary in most cases but wasn't documented until now. (Lev Zakharov)
+* Fix: Ensure pgxpool.Pool.QueryRow.Scan releases connection on panic
+* CancelRequest: don't try to read the reply (Nicola Murino)
+* Fix: correctly handle bool type aliases (Wichert Akkerman)
+* Fix: pgconn.CancelRequest: Fix unix sockets: don't use RemoteAddr()
+* Fix: pgx.Conn memory leak with prepared statement caching (Evan Jones)
+* Add BeforeClose to pgxpool.Pool (Evan Cordell)
+* Fix: various hstore fixes and optimizations (Evan Jones)
+* Fix: RowToStructByPos with embedded unexported struct
+* Support different bool string representations (Lev Zakharov)
+* Fix: error when using BatchResults.Exec on a select that returns an error after some rows.
+* Fix: pipelineBatchResults.Exec() not returning error from ResultReader
+* Fix: pipeline batch results not closing pipeline when error occurs while reading directly from results instead of using
+ a callback.
+* Fix: scanning a table type into a struct
+* Fix: scan array of record to pointer to slice of struct
+* Fix: handle null for json (Cemre Mengu)
+* Batch Query callback is called even when there is an error
+* Add RowTo(AddrOf)StructByNameLax (Audi P. Risa P)
+
+# 5.3.1 (February 27, 2023)
+
+* Fix: Support v4 and v5 stdlib in same program (Tomáš Procházka)
+* Fix: sql.Scanner not being used in certain cases
+* Add text format jsonpath support
+* Fix: fake non-blocking read adaptive wait time
+
+# 5.3.0 (February 11, 2023)
+
+* Fix: json values work with sql.Scanner
+* Fixed / improved error messages (Mark Chambers and Yevgeny Pats)
+* Fix: support scan into single dimensional arrays
+* Fix: MaxConnLifetimeJitter setting actually jitter (Ben Weintraub)
+* Fix: driver.Value representation of bytea should be []byte not string
+* Fix: better handling of unregistered OIDs
+* CopyFrom can use query cache to avoid extra round trip to get OIDs (Alejandro Do Nascimento Mora)
+* Fix: encode to json ignoring driver.Valuer
+* Support sql.Scanner on renamed base type
+* Fix: pgtype.Numeric text encoding of negative numbers (Mark Chambers)
+* Fix: connect with multiple hostnames when one can't be resolved
+* Upgrade puddle to remove dependency on uber/atomic and fix alignment issue on 32-bit platform
+* Fix: scanning json column into **string
+* Multiple reductions in memory allocations
+* Fake non-blocking read adapts its max wait time
+* Improve CopyFrom performance and reduce memory usage
+* Fix: encode []any to array
+* Fix: LoadType for composite with dropped attributes (Felix Röhrich)
+* Support v4 and v5 stdlib in same program
+* Fix: text format array decoding with string of "NULL"
+* Prefer binary format for arrays
+
+# 5.2.0 (December 5, 2022)
+
+* `tracelog.TraceLog` implements the pgx.PrepareTracer interface. (Vitalii Solodilov)
+* Optimize creating begin transaction SQL string (Petr Evdokimov and ksco)
+* `Conn.LoadType` supports range and multirange types (Vitalii Solodilov)
+* Fix scan `uint` and `uint64` `ScanNumeric`. This resolves a PostgreSQL `numeric` being incorrectly scanned into `uint` and `uint64`.
+
+# 5.1.1 (November 17, 2022)
+
+* Fix simple query sanitizer where query text contains a Unicode replacement character.
+* Remove erroneous `name` argument from `DeallocateAll()`. Technically, this is a breaking change, but given that method was only added 5 days ago this change was accepted. (Bodo Kaiser)
+
+# 5.1.0 (November 12, 2022)
+
+* Update puddle to v2.1.2. This resolves a race condition and a deadlock in pgxpool.
+* `QueryRewriter.RewriteQuery` now returns an error. Technically, this is a breaking change for any external implementers, but given the minimal likelihood that there are actually any external implementers this change was accepted.
+* Expose `GetSSLPassword` support to pgx.
+* Fix encode `ErrorResponse` unknown field handling. This would only affect pgproto3 being used directly as a proxy with a non-PostgreSQL server that included additional error fields.
+* Fix date text format encoding with 5 digit years.
+* Fix date values passed to a `sql.Scanner` as `string` instead of `time.Time`.
+* DateCodec.DecodeValue can return `pgtype.InfinityModifier` instead of `string` for infinite values. This now matches the behavior of the timestamp types.
+* Add domain type support to `Conn.LoadType()`.
+* Add `RowToStructByName` and `RowToAddrOfStructByName`. (Pavlo Golub)
+* Add `Conn.DeallocateAll()` to clear all prepared statements including the statement cache. (Bodo Kaiser)
+
+# 5.0.4 (October 24, 2022)
+
+* Fix: CollectOneRow prefers PostgreSQL error over pgx.ErrorNoRows
+* Fix: some reflect Kind checks to first check for nil
+* Bump golang.org/x/text dependency to placate snyk
+* Fix: RowToStructByPos on structs with multiple anonymous sub-structs (Baptiste Fontaine)
+* Fix: Exec checks if tx is closed
+
+# 5.0.3 (October 14, 2022)
+
+* Fix `driver.Valuer` handling edge cases that could cause infinite loop or crash
+
+# v5.0.2 (October 8, 2022)
+
+* Fix date encoding in text format to always use 2 digits for month and day
+* Prefer driver.Valuer over wrap plans when encoding
+* Fix scan to pointer to pointer to renamed type
+* Allow scanning NULL even if PG and Go types are incompatible
+
+# v5.0.1 (September 24, 2022)
+
+* Fix 32-bit atomic usage
+* Add MarshalJSON for Float8 (yogipristiawan)
+* Add `[` and `]` to text encoding of `Lseg`
+* Fix sqlScannerWrapper NULL handling
+
+# v5.0.0 (September 17, 2022)
+
+## Merged Packages
+
+`github.com/jackc/pgtype`, `github.com/jackc/pgconn`, and `github.com/jackc/pgproto3` are now included in the main
+`github.com/jackc/pgx` repository. Previously there was confusion as to where issues should be reported, additional
+release work due to releasing multiple packages, and less clear changelogs.
+
+## pgconn
+
+`CommandTag` is now an opaque type instead of directly exposing an underlying `[]byte`.
+
+The return value `ResultReader.Values()` is no longer safe to retain a reference to after a subsequent call to `NextRow()` or `Close()`.
+
+`Trace()` method adds low level message tracing similar to the `PQtrace` function in `libpq`.
+
+pgconn now uses non-blocking IO. This is a significant internal restructuring, but it should not cause any visible changes on its own. However, it is important in implementing other new features.
+
+`CheckConn()` checks a connection's liveness by doing a non-blocking read. This can be used to detect database restarts or network interruptions without executing a query or a ping.
+
+pgconn now supports pipeline mode.
+
+`*PgConn.ReceiveResults` removed. Use pipeline mode instead.
+
+`Timeout()` no longer considers `context.Canceled` as a timeout error. `context.DeadlineExceeded` still is considered a timeout error.
+
+## pgxpool
+
+`Connect` and `ConnectConfig` have been renamed to `New` and `NewWithConfig` respectively. The `LazyConnect` option has been removed. Pools always lazily connect.
+
+## pgtype
+
+The `pgtype` package has been significantly changed.
+
+### NULL Representation
+
+Previously, types had a `Status` field that could be `Undefined`, `Null`, or `Present`. This has been changed to a
+`Valid` `bool` field to harmonize with how `database/sql` represents `NULL` and to make the zero value useable.
+
+Previously, a type that implemented `driver.Valuer` would have the `Value` method called even on a nil pointer. All nils
+whether typed or untyped now represent `NULL`.
+
+### Codec and Value Split
+
+Previously, the type system combined decoding and encoding values with the value types. e.g. Type `Int8` both handled
+encoding and decoding the PostgreSQL representation and acted as a value object. This caused some difficulties when
+there was not an exact 1 to 1 relationship between the Go types and the PostgreSQL types For example, scanning a
+PostgreSQL binary `numeric` into a Go `float64` was awkward (see https://github.com/jackc/pgtype/issues/147). This
+concepts have been separated. A `Codec` only has responsibility for encoding and decoding values. Value types are
+generally defined by implementing an interface that a particular `Codec` understands (e.g. `PointScanner` and
+`PointValuer` for the PostgreSQL `point` type).
+
+### Array Types
+
+All array types are now handled by `ArrayCodec` instead of using code generation for each new array type. This also
+means that less common array types such as `point[]` are now supported. `Array[T]` supports PostgreSQL multi-dimensional
+arrays.
+
+### Composite Types
+
+Composite types must be registered before use. `CompositeFields` may still be used to construct and destruct composite
+values, but any type may now implement `CompositeIndexGetter` and `CompositeIndexScanner` to be used as a composite.
+
+### Range Types
+
+Range types are now handled with types `RangeCodec` and `Range[T]`. This allows additional user defined range types to
+easily be handled. Multirange types are handled similarly with `MultirangeCodec` and `Multirange[T]`.
+
+### pgxtype
+
+`LoadDataType` moved to `*Conn` as `LoadType`.
+
+### Bytea
+
+The `Bytea` and `GenericBinary` types have been replaced. Use the following instead:
+
+* `[]byte` - For normal usage directly use `[]byte`.
+* `DriverBytes` - Uses driver memory only available until next database method call. Avoids a copy and an allocation.
+* `PreallocBytes` - Uses preallocated byte slice to avoid an allocation.
+* `UndecodedBytes` - Avoids any decoding. Allows working with raw bytes.
+
+### Dropped lib/pq Support
+
+`pgtype` previously supported and was tested against [lib/pq](https://github.com/lib/pq). While it will continue to work
+in most cases this is no longer supported.
+
+### database/sql Scan
+
+Previously, most `Scan` implementations would convert `[]byte` to `string` automatically to decode a text value. Now
+only `string` is handled. This is to allow the possibility of future binary support in `database/sql` mode by
+considering `[]byte` to be binary format and `string` text format. This change should have no effect for any use with
+`pgx`. The previous behavior was only necessary for `lib/pq` compatibility.
+
+Added `*Map.SQLScanner` to create a `sql.Scanner` for types such as `[]int32` and `Range[T]` that do not implement
+`sql.Scanner` directly.
+
+### Number Type Fields Include Bit size
+
+`Int2`, `Int4`, `Int8`, `Float4`, `Float8`, and `Uint32` fields now include bit size. e.g. `Int` is renamed to `Int64`.
+This matches the convention set by `database/sql`. In addition, for comparable types like `pgtype.Int8` and
+`sql.NullInt64` the structures are identical. This means they can be directly converted one to another.
+
+### 3rd Party Type Integrations
+
+* Extracted integrations with https://github.com/shopspring/decimal and https://github.com/gofrs/uuid to
+ https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid respectively. This trims
+ the pgx dependency tree.
+
+### Other Changes
+
+* `Bit` and `Varbit` are both replaced by the `Bits` type.
+* `CID`, `OID`, `OIDValue`, and `XID` are replaced by the `Uint32` type.
+* `Hstore` is now defined as `map[string]*string`.
+* `JSON` and `JSONB` types removed. Use `[]byte` or `string` directly.
+* `QChar` type removed. Use `rune` or `byte` directly.
+* `Inet` and `Cidr` types removed. Use `netip.Addr` and `netip.Prefix` directly. These types are more memory efficient than the previous `net.IPNet`.
+* `Macaddr` type removed. Use `net.HardwareAddr` directly.
+* Renamed `pgtype.ConnInfo` to `pgtype.Map`.
+* Renamed `pgtype.DataType` to `pgtype.Type`.
+* Renamed `pgtype.None` to `pgtype.Finite`.
+* `RegisterType` now accepts a `*Type` instead of `Type`.
+* Assorted array helper methods and types made private.
+
+## stdlib
+
+* Removed `AcquireConn` and `ReleaseConn` as that functionality has been built in since Go 1.13.
+
+## Reduced Memory Usage by Reusing Read Buffers
+
+Previously, the connection read buffer would allocate large chunks of memory and never reuse them. This allowed
+transferring ownership to anything such as scanned values without incurring an additional allocation and memory copy.
+However, this came at the cost of overall increased memory allocation size. But worse it was also possible to pin large
+chunks of memory by retaining a reference to a small value that originally came directly from the read buffer. Now
+ownership remains with the read buffer and anything needing to retain a value must make a copy.
+
+## Query Execution Modes
+
+Control over automatic prepared statement caching and simple protocol use are now combined into query execution mode.
+See documentation for `QueryExecMode`.
+
+## QueryRewriter Interface and NamedArgs
+
+pgx now supports named arguments with the `NamedArgs` type. This is implemented via the new `QueryRewriter` interface which
+allows arbitrary rewriting of query SQL and arguments.
+
+## RowScanner Interface
+
+The `RowScanner` interface allows a single argument to Rows.Scan to scan the entire row.
+
+## Rows Result Helpers
+
+* `CollectRows` and `RowTo*` functions simplify collecting results into a slice.
+* `CollectOneRow` collects one row using `RowTo*` functions.
+* `ForEachRow` simplifies scanning each row and executing code using the scanned values. `ForEachRow` replaces `QueryFunc`.
+
+## Tx Helpers
+
+Rather than every type that implemented `Begin` or `BeginTx` methods also needing to implement `BeginFunc` and
+`BeginTxFunc` these methods have been converted to functions that take a db that implements `Begin` or `BeginTx`.
+
+## Improved Batch Query Ergonomics
+
+Previously, the code for building a batch went in one place before the call to `SendBatch`, and the code for reading the
+results went in one place after the call to `SendBatch`. This could make it difficult to match up the query and the code
+to handle the results. Now `Queue` returns a `QueuedQuery` which has methods `Query`, `QueryRow`, and `Exec` which can
+be used to register a callback function that will handle the result. Callback functions are called automatically when
+`BatchResults.Close` is called.
+
+## SendBatch Uses Pipeline Mode When Appropriate
+
+Previously, a batch with 10 unique parameterized statements executed 100 times would entail 11 network round trips. 1
+for each prepare / describe and 1 for executing them all. Now pipeline mode is used to prepare / describe all statements
+in a single network round trip. So it would only take 2 round trips.
+
+## Tracing and Logging
+
+Internal logging support has been replaced with tracing hooks. This allows custom tracing integration with tools like OpenTelemetry. Package tracelog provides an adapter for pgx v4 loggers to act as a tracer.
+
+All integrations with 3rd party loggers have been extracted to separate repositories. This trims the pgx dependency
+tree.
diff --git a/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md b/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md
new file mode 100644
index 0000000..c975a93
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md
@@ -0,0 +1,121 @@
+# Contributing
+
+## Discuss Significant Changes
+
+Before you invest a significant amount of time on a change, please create a discussion or issue describing your
+proposal. This will help to ensure your proposed change has a reasonable chance of being merged.
+
+## Avoid Dependencies
+
+Adding a dependency is a big deal. While on occasion a new dependency may be accepted, the default answer to any change
+that adds a dependency is no.
+
+## Development Environment Setup
+
+pgx tests naturally require a PostgreSQL database. It will connect to the database specified in the `PGX_TEST_DATABASE`
+environment variable. The `PGX_TEST_DATABASE` environment variable can either be a URL or key-value pairs. In addition,
+the standard `PG*` environment variables will be respected. Consider using [direnv](https://github.com/direnv/direnv) to
+simplify environment variable handling.
+
+### Using an Existing PostgreSQL Cluster
+
+If you already have a PostgreSQL development server this is the quickest way to start and run the majority of the pgx
+test suite. Some tests will be skipped that require server configuration changes (e.g. those testing different
+authentication methods).
+
+Create and setup a test database:
+
+```
+export PGDATABASE=pgx_test
+createdb
+psql -c 'create extension hstore;'
+psql -c 'create extension ltree;'
+psql -c 'create domain uint64 as numeric(20,0);'
+```
+
+Ensure a `postgres` user exists. This happens by default in normal PostgreSQL installs, but some installation methods
+such as Homebrew do not.
+
+```
+createuser -s postgres
+```
+
+Ensure your `PGX_TEST_DATABASE` environment variable points to the database you just created and run the tests.
+
+```
+export PGX_TEST_DATABASE="host=/private/tmp database=pgx_test"
+go test ./...
+```
+
+This will run the vast majority of the tests, but some tests will be skipped (e.g. those testing different connection methods).
+
+### Creating a New PostgreSQL Cluster Exclusively for Testing
+
+The following environment variables need to be set both for initial setup and whenever the tests are run. (direnv is
+highly recommended). Depending on your platform, you may need to change the host for `PGX_TEST_UNIX_SOCKET_CONN_STRING`.
+
+```
+export PGPORT=5015
+export PGUSER=postgres
+export PGDATABASE=pgx_test
+export POSTGRESQL_DATA_DIR=postgresql
+
+export PGX_TEST_DATABASE="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
+export PGX_TEST_UNIX_SOCKET_CONN_STRING="host=/private/tmp database=pgx_test"
+export PGX_TEST_TCP_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
+export PGX_TEST_SCRAM_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_scram password=secret database=pgx_test"
+export PGX_TEST_MD5_PASSWORD_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
+export PGX_TEST_PLAIN_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_pw password=secret"
+export PGX_TEST_TLS_CONN_STRING="host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem"
+export PGX_SSL_PASSWORD=certpw
+export PGX_TEST_TLS_CLIENT_CONN_STRING="host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem database=pgx_test sslcert=`pwd`/.testdb/pgx_sslcert.crt sslkey=`pwd`/.testdb/pgx_sslcert.key"
+```
+
+Create a new database cluster.
+
+```
+initdb --locale=en_US -E UTF-8 --username=postgres .testdb/$POSTGRESQL_DATA_DIR
+
+echo "listen_addresses = '127.0.0.1'" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
+echo "port = $PGPORT" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
+cat testsetup/postgresql_ssl.conf >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
+cp testsetup/pg_hba.conf .testdb/$POSTGRESQL_DATA_DIR/pg_hba.conf
+
+cd .testdb
+
+# Generate CA, server, and encrypted client certificates.
+go run ../testsetup/generate_certs.go
+
+# Copy certificates to server directory and set permissions.
+cp ca.pem $POSTGRESQL_DATA_DIR/root.crt
+cp localhost.key $POSTGRESQL_DATA_DIR/server.key
+chmod 600 $POSTGRESQL_DATA_DIR/server.key
+cp localhost.crt $POSTGRESQL_DATA_DIR/server.crt
+
+cd ..
+```
+
+
+Start the new cluster. This will be necessary whenever you are running pgx tests.
+
+```
+postgres -D .testdb/$POSTGRESQL_DATA_DIR
+```
+
+Setup the test database in the new cluster.
+
+```
+createdb
+psql --no-psqlrc -f testsetup/postgresql_setup.sql
+```
+
+### PgBouncer
+
+There are tests specific for PgBouncer that will be executed if `PGX_TEST_PGBOUNCER_CONN_STRING` is set.
+
+### Optional Tests
+
+pgx supports multiple connection types and means of authentication. These tests are optional. They will only run if the
+appropriate environment variables are set. In addition, there may be tests specific to particular PostgreSQL versions,
+non-PostgreSQL servers (e.g. CockroachDB), or connection poolers (e.g. PgBouncer). `go test ./... -v | grep SKIP` to see
+if any tests are being skipped.
diff --git a/vendor/github.com/jackc/pgx/v5/LICENSE b/vendor/github.com/jackc/pgx/v5/LICENSE
new file mode 100644
index 0000000..5c486c3
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013-2021 Jack Christensen
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/jackc/pgx/v5/README.md b/vendor/github.com/jackc/pgx/v5/README.md
new file mode 100644
index 0000000..0138c2c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/README.md
@@ -0,0 +1,186 @@
+[](https://pkg.go.dev/github.com/jackc/pgx/v5)
+[](https://github.com/jackc/pgx/actions/workflows/ci.yml)
+
+# pgx - PostgreSQL Driver and Toolkit
+
+pgx is a pure Go driver and toolkit for PostgreSQL.
+
+The pgx driver is a low-level, high performance interface that exposes PostgreSQL-specific features such as `LISTEN` /
+`NOTIFY` and `COPY`. It also includes an adapter for the standard `database/sql` interface.
+
+The toolkit component is a related set of packages that implement PostgreSQL functionality such as parsing the wire protocol
+and type mapping between PostgreSQL and Go. These underlying packages can be used to implement alternative drivers,
+proxies, load balancers, logical replication clients, etc.
+
+## Example Usage
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/jackc/pgx/v5"
+)
+
+func main() {
+ // urlExample := "postgres://username:password@localhost:5432/database_name"
+ conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err)
+ os.Exit(1)
+ }
+ defer conn.Close(context.Background())
+
+ var name string
+ var weight int64
+ err = conn.QueryRow(context.Background(), "select name, weight from widgets where id=$1", 42).Scan(&name, &weight)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err)
+ os.Exit(1)
+ }
+
+ fmt.Println(name, weight)
+}
+```
+
+See the [getting started guide](https://github.com/jackc/pgx/wiki/Getting-started-with-pgx) for more information.
+
+## Features
+
+* Support for approximately 70 different PostgreSQL types
+* Automatic statement preparation and caching
+* Batch queries
+* Single-round trip query mode
+* Full TLS connection control
+* Binary format support for custom types (allows for much quicker encoding/decoding)
+* `COPY` protocol support for faster bulk data loads
+* Tracing and logging support
+* Connection pool with after-connect hook for arbitrary connection setup
+* `LISTEN` / `NOTIFY`
+* Conversion of PostgreSQL arrays to Go slice mappings for integers, floats, and strings
+* `hstore` support
+* `json` and `jsonb` support
+* Maps `inet` and `cidr` PostgreSQL types to `netip.Addr` and `netip.Prefix`
+* Large object support
+* NULL mapping to pointer to pointer
+* Supports `database/sql.Scanner` and `database/sql/driver.Valuer` interfaces for custom types
+* Notice response handling
+* Simulated nested transactions with savepoints
+
+## Choosing Between the pgx and database/sql Interfaces
+
+The pgx interface is faster. Many PostgreSQL specific features such as `LISTEN` / `NOTIFY` and `COPY` are not available
+through the `database/sql` interface.
+
+The pgx interface is recommended when:
+
+1. The application only targets PostgreSQL.
+2. No other libraries that require `database/sql` are in use.
+
+It is also possible to use the `database/sql` interface and convert a connection to the lower-level pgx interface as needed.
+
+## Testing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) for setup instructions.
+
+## Architecture
+
+See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube.com/watch?v=sXMSWhcHCf8) for a description of pgx architecture.
+
+## Supported Go and PostgreSQL Versions
+
+pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.23 and higher and PostgreSQL 13 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
+
+## Version Policy
+
+pgx follows semantic versioning for the documented public API on stable releases. `v5` is the latest stable major version.
+
+## PGX Family Libraries
+
+### [github.com/jackc/pglogrepl](https://github.com/jackc/pglogrepl)
+
+pglogrepl provides functionality to act as a client for PostgreSQL logical replication.
+
+### [github.com/jackc/pgmock](https://github.com/jackc/pgmock)
+
+pgmock offers the ability to create a server that mocks the PostgreSQL wire protocol. This is used internally to test pgx by purposely inducing unusual errors. pgproto3 and pgmock together provide most of the foundational tooling required to implement a PostgreSQL proxy or MitM (such as for a custom connection pooler).
+
+### [github.com/jackc/tern](https://github.com/jackc/tern)
+
+tern is a stand-alone SQL migration system.
+
+### [github.com/jackc/pgerrcode](https://github.com/jackc/pgerrcode)
+
+pgerrcode contains constants for the PostgreSQL error codes.
+
+## Adapters for 3rd Party Types
+
+* [github.com/jackc/pgx-gofrs-uuid](https://github.com/jackc/pgx-gofrs-uuid)
+* [github.com/jackc/pgx-shopspring-decimal](https://github.com/jackc/pgx-shopspring-decimal)
+* [github.com/twpayne/pgx-geos](https://github.com/twpayne/pgx-geos) ([PostGIS](https://postgis.net/) and [GEOS](https://libgeos.org/) via [go-geos](https://github.com/twpayne/go-geos))
+* [github.com/vgarvardt/pgx-google-uuid](https://github.com/vgarvardt/pgx-google-uuid)
+
+
+## Adapters for 3rd Party Tracers
+
+* [github.com/jackhopner/pgx-xray-tracer](https://github.com/jackhopner/pgx-xray-tracer)
+
+## Adapters for 3rd Party Loggers
+
+These adapters can be used with the tracelog package.
+
+* [github.com/jackc/pgx-go-kit-log](https://github.com/jackc/pgx-go-kit-log)
+* [github.com/jackc/pgx-log15](https://github.com/jackc/pgx-log15)
+* [github.com/jackc/pgx-logrus](https://github.com/jackc/pgx-logrus)
+* [github.com/jackc/pgx-zap](https://github.com/jackc/pgx-zap)
+* [github.com/jackc/pgx-zerolog](https://github.com/jackc/pgx-zerolog)
+* [github.com/mcosta74/pgx-slog](https://github.com/mcosta74/pgx-slog)
+* [github.com/kataras/pgx-golog](https://github.com/kataras/pgx-golog)
+
+## 3rd Party Libraries with PGX Support
+
+### [github.com/pashagolub/pgxmock](https://github.com/pashagolub/pgxmock)
+
+pgxmock is a mock library implementing pgx interfaces.
+pgxmock has one and only purpose - to simulate pgx behavior in tests, without needing a real database connection.
+
+### [github.com/georgysavva/scany](https://github.com/georgysavva/scany)
+
+Library for scanning data from a database into Go structs and more.
+
+### [github.com/vingarcia/ksql](https://github.com/vingarcia/ksql)
+
+A carefully designed SQL client for making using SQL easier,
+more productive, and less error-prone on Golang.
+
+### [github.com/otan/gopgkrb5](https://github.com/otan/gopgkrb5)
+
+Adds GSSAPI / Kerberos authentication support.
+
+### [github.com/wcamarao/pmx](https://github.com/wcamarao/pmx)
+
+Explicit data mapping and scanning library for Go structs and slices.
+
+### [github.com/stephenafamo/scan](https://github.com/stephenafamo/scan)
+
+Type safe and flexible package for scanning database data into Go types.
+Supports, structs, maps, slices and custom mapping functions.
+
+### [github.com/z0ne-dev/mgx](https://github.com/z0ne-dev/mgx)
+
+Code first migration library for native pgx (no database/sql abstraction).
+
+### [github.com/amirsalarsafaei/sqlc-pgx-monitoring](https://github.com/amirsalarsafaei/sqlc-pgx-monitoring)
+
+A database monitoring/metrics library for pgx and sqlc. Trace, log and monitor your sqlc query performance using OpenTelemetry.
+
+### [https://github.com/nikolayk812/pgx-outbox](https://github.com/nikolayk812/pgx-outbox)
+
+Simple Golang implementation for transactional outbox pattern for PostgreSQL using jackc/pgx driver.
+
+### [https://github.com/Arlandaren/pgxWrappy](https://github.com/Arlandaren/pgxWrappy)
+
+Simplifies working with the pgx library, providing convenient scanning of nested structures.
diff --git a/vendor/github.com/jackc/pgx/v5/Rakefile b/vendor/github.com/jackc/pgx/v5/Rakefile
new file mode 100644
index 0000000..3e3aa50
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/Rakefile
@@ -0,0 +1,18 @@
+require "erb"
+
+rule '.go' => '.go.erb' do |task|
+ erb = ERB.new(File.read(task.source))
+ File.write(task.name, "// Code generated from #{task.source}. DO NOT EDIT.\n\n" + erb.result(binding))
+ sh "goimports", "-w", task.name
+end
+
+generated_code_files = [
+ "pgtype/int.go",
+ "pgtype/int_test.go",
+ "pgtype/integration_benchmark_test.go",
+ "pgtype/zeronull/int.go",
+ "pgtype/zeronull/int_test.go"
+]
+
+desc "Generate code"
+task generate: generated_code_files
diff --git a/vendor/github.com/jackc/pgx/v5/batch.go b/vendor/github.com/jackc/pgx/v5/batch.go
new file mode 100644
index 0000000..c3c2834
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/batch.go
@@ -0,0 +1,443 @@
+package pgx
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// QueuedQuery is a query that has been queued for execution via a Batch.
+type QueuedQuery struct {
+ SQL string
+ Arguments []any
+ Fn batchItemFunc
+ sd *pgconn.StatementDescription
+}
+
+type batchItemFunc func(br BatchResults) error
+
+// Query sets fn to be called when the response to qq is received.
+func (qq *QueuedQuery) Query(fn func(rows Rows) error) {
+ qq.Fn = func(br BatchResults) error {
+ rows, _ := br.Query()
+ defer rows.Close()
+
+ err := fn(rows)
+ if err != nil {
+ return err
+ }
+ rows.Close()
+
+ return rows.Err()
+ }
+}
+
+// Query sets fn to be called when the response to qq is received.
+func (qq *QueuedQuery) QueryRow(fn func(row Row) error) {
+ qq.Fn = func(br BatchResults) error {
+ row := br.QueryRow()
+ return fn(row)
+ }
+}
+
+// Exec sets fn to be called when the response to qq is received.
+func (qq *QueuedQuery) Exec(fn func(ct pgconn.CommandTag) error) {
+ qq.Fn = func(br BatchResults) error {
+ ct, err := br.Exec()
+ if err != nil {
+ return err
+ }
+
+ return fn(ct)
+ }
+}
+
+// Batch queries are a way of bundling multiple queries together to avoid
+// unnecessary network round trips. A Batch must only be sent once.
+type Batch struct {
+ QueuedQueries []*QueuedQuery
+}
+
+// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement. The only pgx option
+// argument that is supported is QueryRewriter. Queries are executed using the connection's DefaultQueryExecMode.
+//
+// While query can contain multiple statements if the connection's DefaultQueryExecMode is QueryModeSimple, this should
+// be avoided. QueuedQuery.Fn must not be set as it will only be called for the first query. That is, QueuedQuery.Query,
+// QueuedQuery.QueryRow, and QueuedQuery.Exec must not be called. In addition, any error messages or tracing that
+// include the current query may reference the wrong query.
+func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
+ qq := &QueuedQuery{
+ SQL: query,
+ Arguments: arguments,
+ }
+ b.QueuedQueries = append(b.QueuedQueries, qq)
+ return qq
+}
+
+// Len returns number of queries that have been queued so far.
+func (b *Batch) Len() int {
+ return len(b.QueuedQueries)
+}
+
+type BatchResults interface {
+ // Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec. Prefer
+ // calling Exec on the QueuedQuery.
+ Exec() (pgconn.CommandTag, error)
+
+ // Query reads the results from the next query in the batch as if the query has been sent with Conn.Query. Prefer
+ // calling Query on the QueuedQuery.
+ Query() (Rows, error)
+
+ // QueryRow reads the results from the next query in the batch as if the query has been sent with Conn.QueryRow.
+ // Prefer calling QueryRow on the QueuedQuery.
+ QueryRow() Row
+
+ // Close closes the batch operation. All unread results are read and any callback functions registered with
+ // QueuedQuery.Query, QueuedQuery.QueryRow, or QueuedQuery.Exec will be called. If a callback function returns an
+ // error or the batch encounters an error subsequent callback functions will not be called.
+ //
+ // Close must be called before the underlying connection can be used again. Any error that occurred during a batch
+ // operation may have made it impossible to resyncronize the connection with the server. In this case the underlying
+ // connection will have been closed.
+ //
+ // Close is safe to call multiple times. If it returns an error subsequent calls will return the same error. Callback
+ // functions will not be rerun.
+ Close() error
+}
+
+type batchResults struct {
+ ctx context.Context
+ conn *Conn
+ mrr *pgconn.MultiResultReader
+ err error
+ b *Batch
+ qqIdx int
+ closed bool
+ endTraced bool
+}
+
+// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
+func (br *batchResults) Exec() (pgconn.CommandTag, error) {
+ if br.err != nil {
+ return pgconn.CommandTag{}, br.err
+ }
+ if br.closed {
+ return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
+ }
+
+ query, arguments, _ := br.nextQueryAndArgs()
+
+ if !br.mrr.NextResult() {
+ err := br.mrr.Close()
+ if err == nil {
+ err = errors.New("no more results in batch")
+ }
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ Err: err,
+ })
+ }
+ return pgconn.CommandTag{}, err
+ }
+
+ commandTag, err := br.mrr.ResultReader().Close()
+ if err != nil {
+ br.err = err
+ br.mrr.Close()
+ }
+
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ CommandTag: commandTag,
+ Err: br.err,
+ })
+ }
+
+ return commandTag, br.err
+}
+
+// Query reads the results from the next query in the batch as if the query has been sent with Query.
+func (br *batchResults) Query() (Rows, error) {
+ query, arguments, ok := br.nextQueryAndArgs()
+ if !ok {
+ query = "batch query"
+ }
+
+ if br.err != nil {
+ return &baseRows{err: br.err, closed: true}, br.err
+ }
+
+ if br.closed {
+ alreadyClosedErr := fmt.Errorf("batch already closed")
+ return &baseRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
+ }
+
+ rows := br.conn.getRows(br.ctx, query, arguments)
+ rows.batchTracer = br.conn.batchTracer
+
+ if !br.mrr.NextResult() {
+ rows.err = br.mrr.Close()
+ if rows.err == nil {
+ rows.err = errors.New("no more results in batch")
+ }
+ rows.closed = true
+
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ Err: rows.err,
+ })
+ }
+
+ return rows, rows.err
+ }
+
+ rows.resultReader = br.mrr.ResultReader()
+ return rows, nil
+}
+
+// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
+func (br *batchResults) QueryRow() Row {
+ rows, _ := br.Query()
+ return (*connRow)(rows.(*baseRows))
+
+}
+
+// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
+// resyncronize the connection with the server. In this case the underlying connection will have been closed.
+func (br *batchResults) Close() error {
+ defer func() {
+ if !br.endTraced {
+ if br.conn != nil && br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchEnd(br.ctx, br.conn, TraceBatchEndData{Err: br.err})
+ }
+ br.endTraced = true
+ }
+ }()
+
+ if br.err != nil {
+ return br.err
+ }
+
+ if br.closed {
+ return nil
+ }
+
+ // Read and run fn for all remaining items
+ for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
+ if br.b.QueuedQueries[br.qqIdx].Fn != nil {
+ err := br.b.QueuedQueries[br.qqIdx].Fn(br)
+ if err != nil {
+ br.err = err
+ }
+ } else {
+ br.Exec()
+ }
+ }
+
+ br.closed = true
+
+ err := br.mrr.Close()
+ if br.err == nil {
+ br.err = err
+ }
+
+ return br.err
+}
+
+func (br *batchResults) earlyError() error {
+ return br.err
+}
+
+func (br *batchResults) nextQueryAndArgs() (query string, args []any, ok bool) {
+ if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
+ bi := br.b.QueuedQueries[br.qqIdx]
+ query = bi.SQL
+ args = bi.Arguments
+ ok = true
+ br.qqIdx++
+ }
+ return
+}
+
+type pipelineBatchResults struct {
+ ctx context.Context
+ conn *Conn
+ pipeline *pgconn.Pipeline
+ lastRows *baseRows
+ err error
+ b *Batch
+ qqIdx int
+ closed bool
+ endTraced bool
+}
+
+// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
+func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
+ if br.err != nil {
+ return pgconn.CommandTag{}, br.err
+ }
+ if br.closed {
+ return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
+ }
+ if br.lastRows != nil && br.lastRows.err != nil {
+ return pgconn.CommandTag{}, br.err
+ }
+
+ query, arguments, err := br.nextQueryAndArgs()
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ results, err := br.pipeline.GetResults()
+ if err != nil {
+ br.err = err
+ return pgconn.CommandTag{}, br.err
+ }
+ var commandTag pgconn.CommandTag
+ switch results := results.(type) {
+ case *pgconn.ResultReader:
+ commandTag, br.err = results.Close()
+ default:
+ return pgconn.CommandTag{}, fmt.Errorf("unexpected pipeline result: %T", results)
+ }
+
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ CommandTag: commandTag,
+ Err: br.err,
+ })
+ }
+
+ return commandTag, br.err
+}
+
+// Query reads the results from the next query in the batch as if the query has been sent with Query.
+func (br *pipelineBatchResults) Query() (Rows, error) {
+ if br.err != nil {
+ return &baseRows{err: br.err, closed: true}, br.err
+ }
+
+ if br.closed {
+ alreadyClosedErr := fmt.Errorf("batch already closed")
+ return &baseRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
+ }
+
+ if br.lastRows != nil && br.lastRows.err != nil {
+ br.err = br.lastRows.err
+ return &baseRows{err: br.err, closed: true}, br.err
+ }
+
+ query, arguments, err := br.nextQueryAndArgs()
+ if err != nil {
+ return &baseRows{err: err, closed: true}, err
+ }
+
+ rows := br.conn.getRows(br.ctx, query, arguments)
+ rows.batchTracer = br.conn.batchTracer
+ br.lastRows = rows
+
+ results, err := br.pipeline.GetResults()
+ if err != nil {
+ br.err = err
+ rows.err = err
+ rows.closed = true
+
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ Err: err,
+ })
+ }
+ } else {
+ switch results := results.(type) {
+ case *pgconn.ResultReader:
+ rows.resultReader = results
+ default:
+ err = fmt.Errorf("unexpected pipeline result: %T", results)
+ br.err = err
+ rows.err = err
+ rows.closed = true
+ }
+ }
+
+ return rows, rows.err
+}
+
+// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
+func (br *pipelineBatchResults) QueryRow() Row {
+ rows, _ := br.Query()
+ return (*connRow)(rows.(*baseRows))
+
+}
+
+// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
+// resyncronize the connection with the server. In this case the underlying connection will have been closed.
+func (br *pipelineBatchResults) Close() error {
+ defer func() {
+ if !br.endTraced {
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchEnd(br.ctx, br.conn, TraceBatchEndData{Err: br.err})
+ }
+ br.endTraced = true
+ }
+ }()
+
+ if br.err == nil && br.lastRows != nil && br.lastRows.err != nil {
+ br.err = br.lastRows.err
+ return br.err
+ }
+
+ if br.closed {
+ return br.err
+ }
+
+ // Read and run fn for all remaining items
+ for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
+ if br.b.QueuedQueries[br.qqIdx].Fn != nil {
+ err := br.b.QueuedQueries[br.qqIdx].Fn(br)
+ if err != nil {
+ br.err = err
+ }
+ } else {
+ br.Exec()
+ }
+ }
+
+ br.closed = true
+
+ err := br.pipeline.Close()
+ if br.err == nil {
+ br.err = err
+ }
+
+ return br.err
+}
+
+func (br *pipelineBatchResults) earlyError() error {
+ return br.err
+}
+
+func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, err error) {
+ if br.b == nil {
+ return "", nil, errors.New("no reference to batch")
+ }
+
+ if br.qqIdx >= len(br.b.QueuedQueries) {
+ return "", nil, errors.New("no more results in batch")
+ }
+
+ bi := br.b.QueuedQueries[br.qqIdx]
+ br.qqIdx++
+ return bi.SQL, bi.Arguments, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/conn.go b/vendor/github.com/jackc/pgx/v5/conn.go
new file mode 100644
index 0000000..93e2e71
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/conn.go
@@ -0,0 +1,1437 @@
+package pgx
+
+import (
+ "context"
+ "crypto/sha256"
+ "database/sql"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/sanitize"
+ "github.com/jackc/pgx/v5/internal/stmtcache"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// ConnConfig contains all the options used to establish a connection. It must be created by ParseConfig and
+// then it can be modified. A manually initialized ConnConfig will cause ConnectConfig to panic.
+type ConnConfig struct {
+ pgconn.Config
+
+ Tracer QueryTracer
+
+ // Original connection string that was parsed into config.
+ connString string
+
+ // StatementCacheCapacity is maximum size of the statement cache used when executing a query with "cache_statement"
+ // query exec mode.
+ StatementCacheCapacity int
+
+ // DescriptionCacheCapacity is the maximum size of the description cache used when executing a query with
+ // "cache_describe" query exec mode.
+ DescriptionCacheCapacity int
+
+ // DefaultQueryExecMode controls the default mode for executing queries. By default pgx uses the extended protocol
+ // and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as
+ // PGBouncer. In this case it may be preferable to use QueryExecModeExec or QueryExecModeSimpleProtocol. The same
+ // functionality can be controlled on a per query basis by passing a QueryExecMode as the first query argument.
+ DefaultQueryExecMode QueryExecMode
+
+ createdByParseConfig bool // Used to enforce created by ParseConfig rule.
+}
+
+// ParseConfigOptions contains options that control how a config is built such as getsslpassword.
+type ParseConfigOptions struct {
+ pgconn.ParseConfigOptions
+}
+
+// Copy returns a deep copy of the config that is safe to use and modify.
+// The only exception is the tls.Config:
+// according to the tls.Config docs it must not be modified after creation.
+func (cc *ConnConfig) Copy() *ConnConfig {
+ newConfig := new(ConnConfig)
+ *newConfig = *cc
+ newConfig.Config = *newConfig.Config.Copy()
+ return newConfig
+}
+
+// ConnString returns the connection string as parsed by pgx.ParseConfig into pgx.ConnConfig.
+func (cc *ConnConfig) ConnString() string { return cc.connString }
+
+// Conn is a PostgreSQL connection handle. It is not safe for concurrent usage. Use a connection pool to manage access
+// to multiple database connections from multiple goroutines.
+type Conn struct {
+ pgConn *pgconn.PgConn
+ config *ConnConfig // config used when establishing this connection
+ preparedStatements map[string]*pgconn.StatementDescription
+ statementCache stmtcache.Cache
+ descriptionCache stmtcache.Cache
+
+ queryTracer QueryTracer
+ batchTracer BatchTracer
+ copyFromTracer CopyFromTracer
+ prepareTracer PrepareTracer
+
+ notifications []*pgconn.Notification
+
+ doneChan chan struct{}
+ closedChan chan error
+
+ typeMap *pgtype.Map
+
+ wbuf []byte
+ eqb ExtendedQueryBuilder
+}
+
+// Identifier a PostgreSQL identifier or name. Identifiers can be composed of
+// multiple parts such as ["schema", "table"] or ["table", "column"].
+type Identifier []string
+
+// Sanitize returns a sanitized string safe for SQL interpolation.
+func (ident Identifier) Sanitize() string {
+ parts := make([]string, len(ident))
+ for i := range ident {
+ s := strings.ReplaceAll(ident[i], string([]byte{0}), "")
+ parts[i] = `"` + strings.ReplaceAll(s, `"`, `""`) + `"`
+ }
+ return strings.Join(parts, ".")
+}
+
+var (
+ // ErrNoRows occurs when rows are expected but none are returned.
+ ErrNoRows = newProxyErr(sql.ErrNoRows, "no rows in result set")
+ // ErrTooManyRows occurs when more rows than expected are returned.
+ ErrTooManyRows = errors.New("too many rows in result set")
+)
+
+func newProxyErr(background error, msg string) error {
+ return &proxyError{
+ msg: msg,
+ background: background,
+ }
+}
+
+type proxyError struct {
+ msg string
+ background error
+}
+
+func (err *proxyError) Error() string { return err.msg }
+
+func (err *proxyError) Unwrap() error { return err.background }
+
+var (
+ errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
+ errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
+)
+
+// Connect establishes a connection with a PostgreSQL server with a connection string. See
+// pgconn.Connect for details.
+func Connect(ctx context.Context, connString string) (*Conn, error) {
+ connConfig, err := ParseConfig(connString)
+ if err != nil {
+ return nil, err
+ }
+ return connect(ctx, connConfig)
+}
+
+// ConnectWithOptions behaves exactly like Connect with the addition of options. At the present options is only used to
+// provide a GetSSLPassword function.
+func ConnectWithOptions(ctx context.Context, connString string, options ParseConfigOptions) (*Conn, error) {
+ connConfig, err := ParseConfigWithOptions(connString, options)
+ if err != nil {
+ return nil, err
+ }
+ return connect(ctx, connConfig)
+}
+
+// ConnectConfig establishes a connection with a PostgreSQL server with a configuration struct.
+// connConfig must have been created by ParseConfig.
+func ConnectConfig(ctx context.Context, connConfig *ConnConfig) (*Conn, error) {
+ // In general this improves safety. In particular avoid the config.Config.OnNotification mutation from affecting other
+ // connections with the same config. See https://github.com/jackc/pgx/issues/618.
+ connConfig = connConfig.Copy()
+
+ return connect(ctx, connConfig)
+}
+
+// ParseConfigWithOptions behaves exactly as ParseConfig does with the addition of options. At the present options is
+// only used to provide a GetSSLPassword function.
+func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*ConnConfig, error) {
+ config, err := pgconn.ParseConfigWithOptions(connString, options.ParseConfigOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ statementCacheCapacity := 512
+ if s, ok := config.RuntimeParams["statement_cache_capacity"]; ok {
+ delete(config.RuntimeParams, "statement_cache_capacity")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse statement_cache_capacity: %w", err)
+ }
+ statementCacheCapacity = int(n)
+ }
+
+ descriptionCacheCapacity := 512
+ if s, ok := config.RuntimeParams["description_cache_capacity"]; ok {
+ delete(config.RuntimeParams, "description_cache_capacity")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse description_cache_capacity: %w", err)
+ }
+ descriptionCacheCapacity = int(n)
+ }
+
+ defaultQueryExecMode := QueryExecModeCacheStatement
+ if s, ok := config.RuntimeParams["default_query_exec_mode"]; ok {
+ delete(config.RuntimeParams, "default_query_exec_mode")
+ switch s {
+ case "cache_statement":
+ defaultQueryExecMode = QueryExecModeCacheStatement
+ case "cache_describe":
+ defaultQueryExecMode = QueryExecModeCacheDescribe
+ case "describe_exec":
+ defaultQueryExecMode = QueryExecModeDescribeExec
+ case "exec":
+ defaultQueryExecMode = QueryExecModeExec
+ case "simple_protocol":
+ defaultQueryExecMode = QueryExecModeSimpleProtocol
+ default:
+ return nil, fmt.Errorf("invalid default_query_exec_mode: %s", s)
+ }
+ }
+
+ connConfig := &ConnConfig{
+ Config: *config,
+ createdByParseConfig: true,
+ StatementCacheCapacity: statementCacheCapacity,
+ DescriptionCacheCapacity: descriptionCacheCapacity,
+ DefaultQueryExecMode: defaultQueryExecMode,
+ connString: connString,
+ }
+
+ return connConfig, nil
+}
+
+// ParseConfig creates a ConnConfig from a connection string. ParseConfig handles all options that [pgconn.ParseConfig]
+// does. In addition, it accepts the following options:
+//
+// - default_query_exec_mode.
+// Possible values: "cache_statement", "cache_describe", "describe_exec", "exec", and "simple_protocol". See
+// QueryExecMode constant documentation for the meaning of these values. Default: "cache_statement".
+//
+// - statement_cache_capacity.
+// The maximum size of the statement cache used when executing a query with "cache_statement" query exec mode.
+// Default: 512.
+//
+// - description_cache_capacity.
+// The maximum size of the description cache used when executing a query with "cache_describe" query exec mode.
+// Default: 512.
+func ParseConfig(connString string) (*ConnConfig, error) {
+ return ParseConfigWithOptions(connString, ParseConfigOptions{})
+}
+
+// connect connects to a database. connect takes ownership of config. The caller must not use or access it again.
+func connect(ctx context.Context, config *ConnConfig) (c *Conn, err error) {
+ if connectTracer, ok := config.Tracer.(ConnectTracer); ok {
+ ctx = connectTracer.TraceConnectStart(ctx, TraceConnectStartData{ConnConfig: config})
+ defer func() {
+ connectTracer.TraceConnectEnd(ctx, TraceConnectEndData{Conn: c, Err: err})
+ }()
+ }
+
+ // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
+ // zero values.
+ if !config.createdByParseConfig {
+ panic("config must be created by ParseConfig")
+ }
+
+ c = &Conn{
+ config: config,
+ typeMap: pgtype.NewMap(),
+ queryTracer: config.Tracer,
+ }
+
+ if t, ok := c.queryTracer.(BatchTracer); ok {
+ c.batchTracer = t
+ }
+ if t, ok := c.queryTracer.(CopyFromTracer); ok {
+ c.copyFromTracer = t
+ }
+ if t, ok := c.queryTracer.(PrepareTracer); ok {
+ c.prepareTracer = t
+ }
+
+ // Only install pgx notification system if no other callback handler is present.
+ if config.Config.OnNotification == nil {
+ config.Config.OnNotification = c.bufferNotifications
+ }
+
+ c.pgConn, err = pgconn.ConnectConfig(ctx, &config.Config)
+ if err != nil {
+ return nil, err
+ }
+
+ c.preparedStatements = make(map[string]*pgconn.StatementDescription)
+ c.doneChan = make(chan struct{})
+ c.closedChan = make(chan error)
+ c.wbuf = make([]byte, 0, 1024)
+
+ if c.config.StatementCacheCapacity > 0 {
+ c.statementCache = stmtcache.NewLRUCache(c.config.StatementCacheCapacity)
+ }
+
+ if c.config.DescriptionCacheCapacity > 0 {
+ c.descriptionCache = stmtcache.NewLRUCache(c.config.DescriptionCacheCapacity)
+ }
+
+ return c, nil
+}
+
+// Close closes a connection. It is safe to call Close on an already closed
+// connection.
+func (c *Conn) Close(ctx context.Context) error {
+ if c.IsClosed() {
+ return nil
+ }
+
+ err := c.pgConn.Close(ctx)
+ return err
+}
+
+// Prepare creates a prepared statement with name and sql. sql can contain placeholders for bound parameters. These
+// placeholders are referenced positionally as $1, $2, etc. name can be used instead of sql with Query, QueryRow, and
+// Exec to execute the statement. It can also be used with Batch.Queue.
+//
+// The underlying PostgreSQL identifier for the prepared statement will be name if name != sql or a digest of sql if
+// name == sql.
+//
+// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same name and sql arguments. This
+// allows a code path to Prepare and Query/Exec without concern for if the statement has already been prepared.
+func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.StatementDescription, err error) {
+ if c.prepareTracer != nil {
+ ctx = c.prepareTracer.TracePrepareStart(ctx, c, TracePrepareStartData{Name: name, SQL: sql})
+ }
+
+ if name != "" {
+ var ok bool
+ if sd, ok = c.preparedStatements[name]; ok && sd.SQL == sql {
+ if c.prepareTracer != nil {
+ c.prepareTracer.TracePrepareEnd(ctx, c, TracePrepareEndData{AlreadyPrepared: true})
+ }
+ return sd, nil
+ }
+ }
+
+ if c.prepareTracer != nil {
+ defer func() {
+ c.prepareTracer.TracePrepareEnd(ctx, c, TracePrepareEndData{Err: err})
+ }()
+ }
+
+ var psName, psKey string
+ if name == sql {
+ digest := sha256.Sum256([]byte(sql))
+ psName = "stmt_" + hex.EncodeToString(digest[0:24])
+ psKey = sql
+ } else {
+ psName = name
+ psKey = name
+ }
+
+ sd, err = c.pgConn.Prepare(ctx, psName, sql, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if psKey != "" {
+ c.preparedStatements[psKey] = sd
+ }
+
+ return sd, nil
+}
+
+// Deallocate releases a prepared statement. Calling Deallocate on a non-existent prepared statement will succeed.
+func (c *Conn) Deallocate(ctx context.Context, name string) error {
+ var psName string
+ sd := c.preparedStatements[name]
+ if sd != nil {
+ psName = sd.Name
+ } else {
+ psName = name
+ }
+
+ err := c.pgConn.Deallocate(ctx, psName)
+ if err != nil {
+ return err
+ }
+
+ if sd != nil {
+ delete(c.preparedStatements, name)
+ }
+
+ return nil
+}
+
+// DeallocateAll releases all previously prepared statements from the server and client, where it also resets the statement and description cache.
+func (c *Conn) DeallocateAll(ctx context.Context) error {
+ c.preparedStatements = map[string]*pgconn.StatementDescription{}
+ if c.config.StatementCacheCapacity > 0 {
+ c.statementCache = stmtcache.NewLRUCache(c.config.StatementCacheCapacity)
+ }
+ if c.config.DescriptionCacheCapacity > 0 {
+ c.descriptionCache = stmtcache.NewLRUCache(c.config.DescriptionCacheCapacity)
+ }
+ _, err := c.pgConn.Exec(ctx, "deallocate all").ReadAll()
+ return err
+}
+
+func (c *Conn) bufferNotifications(_ *pgconn.PgConn, n *pgconn.Notification) {
+ c.notifications = append(c.notifications, n)
+}
+
+// WaitForNotification waits for a PostgreSQL notification. It wraps the underlying pgconn notification system in a
+// slightly more convenient form.
+func (c *Conn) WaitForNotification(ctx context.Context) (*pgconn.Notification, error) {
+ var n *pgconn.Notification
+
+ // Return already received notification immediately
+ if len(c.notifications) > 0 {
+ n = c.notifications[0]
+ c.notifications = c.notifications[1:]
+ return n, nil
+ }
+
+ err := c.pgConn.WaitForNotification(ctx)
+ if len(c.notifications) > 0 {
+ n = c.notifications[0]
+ c.notifications = c.notifications[1:]
+ }
+ return n, err
+}
+
+// IsClosed reports if the connection has been closed.
+func (c *Conn) IsClosed() bool {
+ return c.pgConn.IsClosed()
+}
+
+func (c *Conn) die() {
+ if c.IsClosed() {
+ return
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // force immediate hard cancel
+ c.pgConn.Close(ctx)
+}
+
+func quoteIdentifier(s string) string {
+ return `"` + strings.ReplaceAll(s, `"`, `""`) + `"`
+}
+
+// Ping delegates to the underlying *pgconn.PgConn.Ping.
+func (c *Conn) Ping(ctx context.Context) error {
+ return c.pgConn.Ping(ctx)
+}
+
+// PgConn returns the underlying *pgconn.PgConn. This is an escape hatch method that allows lower level access to the
+// PostgreSQL connection than pgx exposes.
+//
+// It is strongly recommended that the connection be idle (no in-progress queries) before the underlying *pgconn.PgConn
+// is used and the connection must be returned to the same state before any *pgx.Conn methods are again used.
+func (c *Conn) PgConn() *pgconn.PgConn { return c.pgConn }
+
+// TypeMap returns the connection info used for this connection.
+func (c *Conn) TypeMap() *pgtype.Map { return c.typeMap }
+
+// Config returns a copy of config that was used to establish this connection.
+func (c *Conn) Config() *ConnConfig { return c.config.Copy() }
+
+// Exec executes sql. sql can be either a prepared statement name or an SQL string. arguments should be referenced
+// positionally from the sql string as $1, $2, etc.
+func (c *Conn) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+ if c.queryTracer != nil {
+ ctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sql, Args: arguments})
+ }
+
+ if err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ commandTag, err := c.exec(ctx, sql, arguments...)
+
+ if c.queryTracer != nil {
+ c.queryTracer.TraceQueryEnd(ctx, c, TraceQueryEndData{CommandTag: commandTag, Err: err})
+ }
+
+ return commandTag, err
+}
+
+func (c *Conn) exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) {
+ mode := c.config.DefaultQueryExecMode
+ var queryRewriter QueryRewriter
+
+optionLoop:
+ for len(arguments) > 0 {
+ switch arg := arguments[0].(type) {
+ case QueryExecMode:
+ mode = arg
+ arguments = arguments[1:]
+ case QueryRewriter:
+ queryRewriter = arg
+ arguments = arguments[1:]
+ default:
+ break optionLoop
+ }
+ }
+
+ if queryRewriter != nil {
+ sql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)
+ if err != nil {
+ return pgconn.CommandTag{}, fmt.Errorf("rewrite query failed: %w", err)
+ }
+ }
+
+ // Always use simple protocol when there are no arguments.
+ if len(arguments) == 0 {
+ mode = QueryExecModeSimpleProtocol
+ }
+
+ if sd, ok := c.preparedStatements[sql]; ok {
+ return c.execPrepared(ctx, sd, arguments)
+ }
+
+ switch mode {
+ case QueryExecModeCacheStatement:
+ if c.statementCache == nil {
+ return pgconn.CommandTag{}, errDisabledStatementCache
+ }
+ sd := c.statementCache.Get(sql)
+ if sd == nil {
+ sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ c.statementCache.Put(sd)
+ }
+
+ return c.execPrepared(ctx, sd, arguments)
+ case QueryExecModeCacheDescribe:
+ if c.descriptionCache == nil {
+ return pgconn.CommandTag{}, errDisabledDescriptionCache
+ }
+ sd := c.descriptionCache.Get(sql)
+ if sd == nil {
+ sd, err = c.Prepare(ctx, "", sql)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ c.descriptionCache.Put(sd)
+ }
+
+ return c.execParams(ctx, sd, arguments)
+ case QueryExecModeDescribeExec:
+ sd, err := c.Prepare(ctx, "", sql)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ return c.execPrepared(ctx, sd, arguments)
+ case QueryExecModeExec:
+ return c.execSQLParams(ctx, sql, arguments)
+ case QueryExecModeSimpleProtocol:
+ return c.execSimpleProtocol(ctx, sql, arguments)
+ default:
+ return pgconn.CommandTag{}, fmt.Errorf("unknown QueryExecMode: %v", mode)
+ }
+}
+
+func (c *Conn) execSimpleProtocol(ctx context.Context, sql string, arguments []any) (commandTag pgconn.CommandTag, err error) {
+ if len(arguments) > 0 {
+ sql, err = c.sanitizeForSimpleQuery(sql, arguments...)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ }
+
+ mrr := c.pgConn.Exec(ctx, sql)
+ for mrr.NextResult() {
+ commandTag, _ = mrr.ResultReader().Close()
+ }
+ err = mrr.Close()
+ return commandTag, err
+}
+
+func (c *Conn) execParams(ctx context.Context, sd *pgconn.StatementDescription, arguments []any) (pgconn.CommandTag, error) {
+ err := c.eqb.Build(c.typeMap, sd, arguments)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ result := c.pgConn.ExecParams(ctx, sd.SQL, c.eqb.ParamValues, sd.ParamOIDs, c.eqb.ParamFormats, c.eqb.ResultFormats).Read()
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+ return result.CommandTag, result.Err
+}
+
+func (c *Conn) execPrepared(ctx context.Context, sd *pgconn.StatementDescription, arguments []any) (pgconn.CommandTag, error) {
+ err := c.eqb.Build(c.typeMap, sd, arguments)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ result := c.pgConn.ExecPrepared(ctx, sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats).Read()
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+ return result.CommandTag, result.Err
+}
+
+func (c *Conn) execSQLParams(ctx context.Context, sql string, args []any) (pgconn.CommandTag, error) {
+ err := c.eqb.Build(c.typeMap, nil, args)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ result := c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats).Read()
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+ return result.CommandTag, result.Err
+}
+
+func (c *Conn) getRows(ctx context.Context, sql string, args []any) *baseRows {
+ r := &baseRows{}
+
+ r.ctx = ctx
+ r.queryTracer = c.queryTracer
+ r.typeMap = c.typeMap
+ r.startTime = time.Now()
+ r.sql = sql
+ r.args = args
+ r.conn = c
+
+ return r
+}
+
+type QueryExecMode int32
+
+const (
+ _ QueryExecMode = iota
+
+ // Automatically prepare and cache statements. This uses the extended protocol. Queries are executed in a single round
+ // trip after the statement is cached. This is the default. If the database schema is modified or the search_path is
+ // changed after a statement is cached then the first execution of a previously cached query may fail. e.g. If the
+ // number of columns returned by a "SELECT *" changes or the type of a column is changed.
+ QueryExecModeCacheStatement
+
+ // Cache statement descriptions (i.e. argument and result types) and assume they do not change. This uses the extended
+ // protocol. Queries are executed in a single round trip after the description is cached. If the database schema is
+ // modified or the search_path is changed after a statement is cached then the first execution of a previously cached
+ // query may fail. e.g. If the number of columns returned by a "SELECT *" changes or the type of a column is changed.
+ QueryExecModeCacheDescribe
+
+ // Get the statement description on every execution. This uses the extended protocol. Queries require two round trips
+ // to execute. It does not use named prepared statements. But it does use the unnamed prepared statement to get the
+ // statement description on the first round trip and then uses it to execute the query on the second round trip. This
+ // may cause problems with connection poolers that switch the underlying connection between round trips. It is safe
+ // even when the database schema is modified concurrently.
+ QueryExecModeDescribeExec
+
+ // Assume the PostgreSQL query parameter types based on the Go type of the arguments. This uses the extended protocol
+ // with text formatted parameters and results. Queries are executed in a single round trip. Type mappings can be
+ // registered with pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are
+ // unregistered or ambiguous. e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know
+ // the PostgreSQL type can use a map[string]string directly as an argument. This mode cannot.
+ //
+ // On rare occasions user defined types may behave differently when encoded in the text format instead of the binary
+ // format. For example, this could happen if a "type RomanNumeral int32" implements fmt.Stringer to format integers as
+ // Roman numerals (e.g. 7 is VII). The binary format would properly encode the integer 7 as the binary value for 7.
+ // But the text format would encode the integer 7 as the string "VII". As QueryExecModeExec uses the text format, it
+ // is possible that changing query mode from another mode to QueryExecModeExec could change the behavior of the query.
+ // This should not occur with types pgx supports directly and can be avoided by registering the types with
+ // pgtype.Map.RegisterDefaultPgType and implementing the appropriate type interfaces. In the cas of RomanNumeral, it
+ // should implement pgtype.Int64Valuer.
+ QueryExecModeExec
+
+ // Use the simple protocol. Assume the PostgreSQL query parameter types based on the Go type of the arguments. This is
+ // especially significant for []byte values. []byte values are encoded as PostgreSQL bytea. string must be used
+ // instead for text type values including json and jsonb. Type mappings can be registered with
+ // pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are unregistered or ambiguous.
+ // e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know the PostgreSQL type can use a
+ // map[string]string directly as an argument. This mode cannot. Queries are executed in a single round trip.
+ //
+ // QueryExecModeSimpleProtocol should have the user application visible behavior as QueryExecModeExec. This includes
+ // the warning regarding differences in text format and binary format encoding with user defined types. There may be
+ // other minor exceptions such as behavior when multiple result returning queries are erroneously sent in a single
+ // string.
+ //
+ // QueryExecModeSimpleProtocol uses client side parameter interpolation. All values are quoted and escaped. Prefer
+ // QueryExecModeExec over QueryExecModeSimpleProtocol whenever possible. In general QueryExecModeSimpleProtocol should
+ // only be used if connecting to a proxy server, connection pool server, or non-PostgreSQL server that does not
+ // support the extended protocol.
+ QueryExecModeSimpleProtocol
+)
+
+func (m QueryExecMode) String() string {
+ switch m {
+ case QueryExecModeCacheStatement:
+ return "cache statement"
+ case QueryExecModeCacheDescribe:
+ return "cache describe"
+ case QueryExecModeDescribeExec:
+ return "describe exec"
+ case QueryExecModeExec:
+ return "exec"
+ case QueryExecModeSimpleProtocol:
+ return "simple protocol"
+ default:
+ return "invalid"
+ }
+}
+
+// QueryResultFormats controls the result format (text=0, binary=1) of a query by result column position.
+type QueryResultFormats []int16
+
+// QueryResultFormatsByOID controls the result format (text=0, binary=1) of a query by the result column OID.
+type QueryResultFormatsByOID map[uint32]int16
+
+// QueryRewriter rewrites a query when used as the first arguments to a query method.
+type QueryRewriter interface {
+ RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error)
+}
+
+// Query sends a query to the server and returns a Rows to read the results. Only errors encountered sending the query
+// and initializing Rows will be returned. Err() on the returned Rows must be checked after the Rows is closed to
+// determine if the query executed successfully.
+//
+// The returned Rows must be closed before the connection can be used again. It is safe to attempt to read from the
+// returned Rows even if an error is returned. The error will be the available in rows.Err() after rows are closed. It
+// is allowed to ignore the error returned from Query and handle it in Rows.
+//
+// It is possible for a call of FieldDescriptions on the returned Rows to return nil even if the Query call did not
+// return an error.
+//
+// It is possible for a query to return one or more rows before encountering an error. In most cases the rows should be
+// collected before processing rather than processed while receiving each row. This avoids the possibility of the
+// application processing rows from a query that the server rejected. The CollectRows function is useful here.
+//
+// An implementor of QueryRewriter may be passed as the first element of args. It can rewrite the sql and change or
+// replace args. For example, NamedArgs is QueryRewriter that implements named arguments.
+//
+// For extra control over how the query is executed, the types QueryExecMode, QueryResultFormats, and
+// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
+// needed. See the documentation for those types for details.
+func (c *Conn) Query(ctx context.Context, sql string, args ...any) (Rows, error) {
+ if c.queryTracer != nil {
+ ctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sql, Args: args})
+ }
+
+ if err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {
+ if c.queryTracer != nil {
+ c.queryTracer.TraceQueryEnd(ctx, c, TraceQueryEndData{Err: err})
+ }
+ return &baseRows{err: err, closed: true}, err
+ }
+
+ var resultFormats QueryResultFormats
+ var resultFormatsByOID QueryResultFormatsByOID
+ mode := c.config.DefaultQueryExecMode
+ var queryRewriter QueryRewriter
+
+optionLoop:
+ for len(args) > 0 {
+ switch arg := args[0].(type) {
+ case QueryResultFormats:
+ resultFormats = arg
+ args = args[1:]
+ case QueryResultFormatsByOID:
+ resultFormatsByOID = arg
+ args = args[1:]
+ case QueryExecMode:
+ mode = arg
+ args = args[1:]
+ case QueryRewriter:
+ queryRewriter = arg
+ args = args[1:]
+ default:
+ break optionLoop
+ }
+ }
+
+ if queryRewriter != nil {
+ var err error
+ originalSQL := sql
+ originalArgs := args
+ sql, args, err = queryRewriter.RewriteQuery(ctx, c, sql, args)
+ if err != nil {
+ rows := c.getRows(ctx, originalSQL, originalArgs)
+ err = fmt.Errorf("rewrite query failed: %w", err)
+ rows.fatal(err)
+ return rows, err
+ }
+ }
+
+ // Bypass any statement caching.
+ if sql == "" {
+ mode = QueryExecModeSimpleProtocol
+ }
+
+ c.eqb.reset()
+ rows := c.getRows(ctx, sql, args)
+
+ var err error
+ sd, explicitPreparedStatement := c.preparedStatements[sql]
+ if sd != nil || mode == QueryExecModeCacheStatement || mode == QueryExecModeCacheDescribe || mode == QueryExecModeDescribeExec {
+ if sd == nil {
+ sd, err = c.getStatementDescription(ctx, mode, sql)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+ }
+
+ if len(sd.ParamOIDs) != len(args) {
+ rows.fatal(fmt.Errorf("expected %d arguments, got %d", len(sd.ParamOIDs), len(args)))
+ return rows, rows.err
+ }
+
+ rows.sql = sd.SQL
+
+ err = c.eqb.Build(c.typeMap, sd, args)
+ if err != nil {
+ rows.fatal(err)
+ return rows, rows.err
+ }
+
+ if resultFormatsByOID != nil {
+ resultFormats = make([]int16, len(sd.Fields))
+ for i := range resultFormats {
+ resultFormats[i] = resultFormatsByOID[uint32(sd.Fields[i].DataTypeOID)]
+ }
+ }
+
+ if resultFormats == nil {
+ resultFormats = c.eqb.ResultFormats
+ }
+
+ if !explicitPreparedStatement && mode == QueryExecModeCacheDescribe {
+ rows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, sd.ParamOIDs, c.eqb.ParamFormats, resultFormats)
+ } else {
+ rows.resultReader = c.pgConn.ExecPrepared(ctx, sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, resultFormats)
+ }
+ } else if mode == QueryExecModeExec {
+ err := c.eqb.Build(c.typeMap, nil, args)
+ if err != nil {
+ rows.fatal(err)
+ return rows, rows.err
+ }
+
+ rows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ } else if mode == QueryExecModeSimpleProtocol {
+ sql, err = c.sanitizeForSimpleQuery(sql, args...)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+
+ mrr := c.pgConn.Exec(ctx, sql)
+ if mrr.NextResult() {
+ rows.resultReader = mrr.ResultReader()
+ rows.multiResultReader = mrr
+ } else {
+ err = mrr.Close()
+ rows.fatal(err)
+ return rows, err
+ }
+
+ return rows, nil
+ } else {
+ err = fmt.Errorf("unknown QueryExecMode: %v", mode)
+ rows.fatal(err)
+ return rows, rows.err
+ }
+
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+
+ return rows, rows.err
+}
+
+// getStatementDescription returns the statement description of the sql query
+// according to the given mode.
+//
+// If the mode is one that doesn't require to know the param and result OIDs
+// then nil is returned without error.
+func (c *Conn) getStatementDescription(
+ ctx context.Context,
+ mode QueryExecMode,
+ sql string,
+) (sd *pgconn.StatementDescription, err error) {
+ switch mode {
+ case QueryExecModeCacheStatement:
+ if c.statementCache == nil {
+ return nil, errDisabledStatementCache
+ }
+ sd = c.statementCache.Get(sql)
+ if sd == nil {
+ sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
+ if err != nil {
+ return nil, err
+ }
+ c.statementCache.Put(sd)
+ }
+ case QueryExecModeCacheDescribe:
+ if c.descriptionCache == nil {
+ return nil, errDisabledDescriptionCache
+ }
+ sd = c.descriptionCache.Get(sql)
+ if sd == nil {
+ sd, err = c.Prepare(ctx, "", sql)
+ if err != nil {
+ return nil, err
+ }
+ c.descriptionCache.Put(sd)
+ }
+ case QueryExecModeDescribeExec:
+ return c.Prepare(ctx, "", sql)
+ }
+ return sd, err
+}
+
+// QueryRow is a convenience wrapper over Query. Any error that occurs while
+// querying is deferred until calling Scan on the returned Row. That Row will
+// error with ErrNoRows if no rows are returned.
+func (c *Conn) QueryRow(ctx context.Context, sql string, args ...any) Row {
+ rows, _ := c.Query(ctx, sql, args...)
+ return (*connRow)(rows.(*baseRows))
+}
+
+// SendBatch sends all queued queries to the server at once. All queries are run in an implicit transaction unless
+// explicit transaction control statements are executed. The returned BatchResults must be closed before the connection
+// is used again.
+//
+// Depending on the QueryExecMode, all queries may be prepared before any are executed. This means that creating a table
+// and using it in a subsequent query in the same batch can fail.
+func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
+ if c.batchTracer != nil {
+ ctx = c.batchTracer.TraceBatchStart(ctx, c, TraceBatchStartData{Batch: b})
+ defer func() {
+ err := br.(interface{ earlyError() error }).earlyError()
+ if err != nil {
+ c.batchTracer.TraceBatchEnd(ctx, c, TraceBatchEndData{Err: err})
+ }
+ }()
+ }
+
+ if err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: err}
+ }
+
+ for _, bi := range b.QueuedQueries {
+ var queryRewriter QueryRewriter
+ sql := bi.SQL
+ arguments := bi.Arguments
+
+ optionLoop:
+ for len(arguments) > 0 {
+ // Update Batch.Queue function comment when additional options are implemented
+ switch arg := arguments[0].(type) {
+ case QueryRewriter:
+ queryRewriter = arg
+ arguments = arguments[1:]
+ default:
+ break optionLoop
+ }
+ }
+
+ if queryRewriter != nil {
+ var err error
+ sql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)
+ if err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: fmt.Errorf("rewrite query failed: %w", err)}
+ }
+ }
+
+ bi.SQL = sql
+ bi.Arguments = arguments
+ }
+
+ // TODO: changing mode per batch? Update Batch.Queue function comment when implemented
+ mode := c.config.DefaultQueryExecMode
+ if mode == QueryExecModeSimpleProtocol {
+ return c.sendBatchQueryExecModeSimpleProtocol(ctx, b)
+ }
+
+ // All other modes use extended protocol and thus can use prepared statements.
+ for _, bi := range b.QueuedQueries {
+ if sd, ok := c.preparedStatements[bi.SQL]; ok {
+ bi.sd = sd
+ }
+ }
+
+ switch mode {
+ case QueryExecModeExec:
+ return c.sendBatchQueryExecModeExec(ctx, b)
+ case QueryExecModeCacheStatement:
+ return c.sendBatchQueryExecModeCacheStatement(ctx, b)
+ case QueryExecModeCacheDescribe:
+ return c.sendBatchQueryExecModeCacheDescribe(ctx, b)
+ case QueryExecModeDescribeExec:
+ return c.sendBatchQueryExecModeDescribeExec(ctx, b)
+ default:
+ panic("unknown QueryExecMode")
+ }
+}
+
+func (c *Conn) sendBatchQueryExecModeSimpleProtocol(ctx context.Context, b *Batch) *batchResults {
+ var sb strings.Builder
+ for i, bi := range b.QueuedQueries {
+ if i > 0 {
+ sb.WriteByte(';')
+ }
+ sql, err := c.sanitizeForSimpleQuery(bi.SQL, bi.Arguments...)
+ if err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: err}
+ }
+ sb.WriteString(sql)
+ }
+ mrr := c.pgConn.Exec(ctx, sb.String())
+ return &batchResults{
+ ctx: ctx,
+ conn: c,
+ mrr: mrr,
+ b: b,
+ qqIdx: 0,
+ }
+}
+
+func (c *Conn) sendBatchQueryExecModeExec(ctx context.Context, b *Batch) *batchResults {
+ batch := &pgconn.Batch{}
+
+ for _, bi := range b.QueuedQueries {
+ sd := bi.sd
+ if sd != nil {
+ err := c.eqb.Build(c.typeMap, sd, bi.Arguments)
+ if err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: err}
+ }
+
+ batch.ExecPrepared(sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ } else {
+ err := c.eqb.Build(c.typeMap, nil, bi.Arguments)
+ if err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: err}
+ }
+ batch.ExecParams(bi.SQL, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ }
+ }
+
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+
+ mrr := c.pgConn.ExecBatch(ctx, batch)
+
+ return &batchResults{
+ ctx: ctx,
+ conn: c,
+ mrr: mrr,
+ b: b,
+ qqIdx: 0,
+ }
+}
+
+func (c *Conn) sendBatchQueryExecModeCacheStatement(ctx context.Context, b *Batch) (pbr *pipelineBatchResults) {
+ if c.statementCache == nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: errDisabledStatementCache, closed: true}
+ }
+
+ distinctNewQueries := []*pgconn.StatementDescription{}
+ distinctNewQueriesIdxMap := make(map[string]int)
+
+ for _, bi := range b.QueuedQueries {
+ if bi.sd == nil {
+ sd := c.statementCache.Get(bi.SQL)
+ if sd != nil {
+ bi.sd = sd
+ } else {
+ if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
+ bi.sd = distinctNewQueries[idx]
+ } else {
+ sd = &pgconn.StatementDescription{
+ Name: stmtcache.StatementName(bi.SQL),
+ SQL: bi.SQL,
+ }
+ distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
+ distinctNewQueries = append(distinctNewQueries, sd)
+ bi.sd = sd
+ }
+ }
+ }
+ }
+
+ return c.sendBatchExtendedWithDescription(ctx, b, distinctNewQueries, c.statementCache)
+}
+
+func (c *Conn) sendBatchQueryExecModeCacheDescribe(ctx context.Context, b *Batch) (pbr *pipelineBatchResults) {
+ if c.descriptionCache == nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: errDisabledDescriptionCache, closed: true}
+ }
+
+ distinctNewQueries := []*pgconn.StatementDescription{}
+ distinctNewQueriesIdxMap := make(map[string]int)
+
+ for _, bi := range b.QueuedQueries {
+ if bi.sd == nil {
+ sd := c.descriptionCache.Get(bi.SQL)
+ if sd != nil {
+ bi.sd = sd
+ } else {
+ if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
+ bi.sd = distinctNewQueries[idx]
+ } else {
+ sd = &pgconn.StatementDescription{
+ SQL: bi.SQL,
+ }
+ distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
+ distinctNewQueries = append(distinctNewQueries, sd)
+ bi.sd = sd
+ }
+ }
+ }
+ }
+
+ return c.sendBatchExtendedWithDescription(ctx, b, distinctNewQueries, c.descriptionCache)
+}
+
+func (c *Conn) sendBatchQueryExecModeDescribeExec(ctx context.Context, b *Batch) (pbr *pipelineBatchResults) {
+ distinctNewQueries := []*pgconn.StatementDescription{}
+ distinctNewQueriesIdxMap := make(map[string]int)
+
+ for _, bi := range b.QueuedQueries {
+ if bi.sd == nil {
+ if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
+ bi.sd = distinctNewQueries[idx]
+ } else {
+ sd := &pgconn.StatementDescription{
+ SQL: bi.SQL,
+ }
+ distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
+ distinctNewQueries = append(distinctNewQueries, sd)
+ bi.sd = sd
+ }
+ }
+ }
+
+ return c.sendBatchExtendedWithDescription(ctx, b, distinctNewQueries, nil)
+}
+
+func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, distinctNewQueries []*pgconn.StatementDescription, sdCache stmtcache.Cache) (pbr *pipelineBatchResults) {
+ pipeline := c.pgConn.StartPipeline(ctx)
+ defer func() {
+ if pbr != nil && pbr.err != nil {
+ pipeline.Close()
+ }
+ }()
+
+ // Prepare any needed queries
+ if len(distinctNewQueries) > 0 {
+ err := func() (err error) {
+ for _, sd := range distinctNewQueries {
+ pipeline.SendPrepare(sd.Name, sd.SQL, nil)
+ }
+
+ // Store all statements we are preparing into the cache. It's fine if it overflows because HandleInvalidated will
+ // clean them up later.
+ if sdCache != nil {
+ for _, sd := range distinctNewQueries {
+ sdCache.Put(sd)
+ }
+ }
+
+ // If something goes wrong preparing the statements, we need to invalidate the cache entries we just added.
+ defer func() {
+ if err != nil && sdCache != nil {
+ for _, sd := range distinctNewQueries {
+ sdCache.Invalidate(sd.SQL)
+ }
+ }
+ }()
+
+ err = pipeline.Sync()
+ if err != nil {
+ return err
+ }
+
+ for _, sd := range distinctNewQueries {
+ results, err := pipeline.GetResults()
+ if err != nil {
+ return err
+ }
+
+ resultSD, ok := results.(*pgconn.StatementDescription)
+ if !ok {
+ return fmt.Errorf("expected statement description, got %T", results)
+ }
+
+ // Fill in the previously empty / pending statement descriptions.
+ sd.ParamOIDs = resultSD.ParamOIDs
+ sd.Fields = resultSD.Fields
+ }
+
+ results, err := pipeline.GetResults()
+ if err != nil {
+ return err
+ }
+
+ _, ok := results.(*pgconn.PipelineSync)
+ if !ok {
+ return fmt.Errorf("expected sync, got %T", results)
+ }
+
+ return nil
+ }()
+ if err != nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
+ }
+ }
+
+ // Queue the queries.
+ for _, bi := range b.QueuedQueries {
+ err := c.eqb.Build(c.typeMap, bi.sd, bi.Arguments)
+ if err != nil {
+ // we wrap the error so we the user can understand which query failed inside the batch
+ err = fmt.Errorf("error building query %s: %w", bi.SQL, err)
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
+ }
+
+ if bi.sd.Name == "" {
+ pipeline.SendQueryParams(bi.sd.SQL, c.eqb.ParamValues, bi.sd.ParamOIDs, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ } else {
+ pipeline.SendQueryPrepared(bi.sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ }
+ }
+
+ err := pipeline.Sync()
+ if err != nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
+ }
+
+ return &pipelineBatchResults{
+ ctx: ctx,
+ conn: c,
+ pipeline: pipeline,
+ b: b,
+ }
+}
+
+func (c *Conn) sanitizeForSimpleQuery(sql string, args ...any) (string, error) {
+ if c.pgConn.ParameterStatus("standard_conforming_strings") != "on" {
+ return "", errors.New("simple protocol queries must be run with standard_conforming_strings=on")
+ }
+
+ if c.pgConn.ParameterStatus("client_encoding") != "UTF8" {
+ return "", errors.New("simple protocol queries must be run with client_encoding=UTF8")
+ }
+
+ var err error
+ valueArgs := make([]any, len(args))
+ for i, a := range args {
+ valueArgs[i], err = convertSimpleArgument(c.typeMap, a)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return sanitize.SanitizeSQL(sql, valueArgs...)
+}
+
+// LoadType inspects the database for typeName and produces a pgtype.Type suitable for registration. typeName must be
+// the name of a type where the underlying type(s) is already understood by pgx. It is for derived types. In particular,
+// typeName must be one of the following:
+// - An array type name of a type that is already registered. e.g. "_foo" when "foo" is registered.
+// - A composite type name where all field types are already registered.
+// - A domain type name where the base type is already registered.
+// - An enum type name.
+// - A range type name where the element type is already registered.
+// - A multirange type name where the element type is already registered.
+func (c *Conn) LoadType(ctx context.Context, typeName string) (*pgtype.Type, error) {
+ var oid uint32
+
+ err := c.QueryRow(ctx, "select $1::text::regtype::oid;", typeName).Scan(&oid)
+ if err != nil {
+ return nil, err
+ }
+
+ var typtype string
+ var typbasetype uint32
+
+ err = c.QueryRow(ctx, "select typtype::text, typbasetype from pg_type where oid=$1", oid).Scan(&typtype, &typbasetype)
+ if err != nil {
+ return nil, err
+ }
+
+ switch typtype {
+ case "b": // array
+ elementOID, err := c.getArrayElementOID(ctx, oid)
+ if err != nil {
+ return nil, err
+ }
+
+ dt, ok := c.TypeMap().TypeForOID(elementOID)
+ if !ok {
+ return nil, errors.New("array element OID not registered")
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}, nil
+ case "c": // composite
+ fields, err := c.getCompositeFields(ctx, oid)
+ if err != nil {
+ return nil, err
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.CompositeCodec{Fields: fields}}, nil
+ case "d": // domain
+ dt, ok := c.TypeMap().TypeForOID(typbasetype)
+ if !ok {
+ return nil, errors.New("domain base type OID not registered")
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: dt.Codec}, nil
+ case "e": // enum
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.EnumCodec{}}, nil
+ case "r": // range
+ elementOID, err := c.getRangeElementOID(ctx, oid)
+ if err != nil {
+ return nil, err
+ }
+
+ dt, ok := c.TypeMap().TypeForOID(elementOID)
+ if !ok {
+ return nil, errors.New("range element OID not registered")
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.RangeCodec{ElementType: dt}}, nil
+ case "m": // multirange
+ elementOID, err := c.getMultiRangeElementOID(ctx, oid)
+ if err != nil {
+ return nil, err
+ }
+
+ dt, ok := c.TypeMap().TypeForOID(elementOID)
+ if !ok {
+ return nil, errors.New("multirange element OID not registered")
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}, nil
+ default:
+ return &pgtype.Type{}, errors.New("unknown typtype")
+ }
+}
+
+func (c *Conn) getArrayElementOID(ctx context.Context, oid uint32) (uint32, error) {
+ var typelem uint32
+
+ err := c.QueryRow(ctx, "select typelem from pg_type where oid=$1", oid).Scan(&typelem)
+ if err != nil {
+ return 0, err
+ }
+
+ return typelem, nil
+}
+
+func (c *Conn) getRangeElementOID(ctx context.Context, oid uint32) (uint32, error) {
+ var typelem uint32
+
+ err := c.QueryRow(ctx, "select rngsubtype from pg_range where rngtypid=$1", oid).Scan(&typelem)
+ if err != nil {
+ return 0, err
+ }
+
+ return typelem, nil
+}
+
+func (c *Conn) getMultiRangeElementOID(ctx context.Context, oid uint32) (uint32, error) {
+ var typelem uint32
+
+ err := c.QueryRow(ctx, "select rngtypid from pg_range where rngmultitypid=$1", oid).Scan(&typelem)
+ if err != nil {
+ return 0, err
+ }
+
+ return typelem, nil
+}
+
+func (c *Conn) getCompositeFields(ctx context.Context, oid uint32) ([]pgtype.CompositeCodecField, error) {
+ var typrelid uint32
+
+ err := c.QueryRow(ctx, "select typrelid from pg_type where oid=$1", oid).Scan(&typrelid)
+ if err != nil {
+ return nil, err
+ }
+
+ var fields []pgtype.CompositeCodecField
+ var fieldName string
+ var fieldOID uint32
+ rows, _ := c.Query(ctx, `select attname, atttypid
+from pg_attribute
+where attrelid=$1
+ and not attisdropped
+ and attnum > 0
+order by attnum`,
+ typrelid,
+ )
+ _, err = ForEachRow(rows, []any{&fieldName, &fieldOID}, func() error {
+ dt, ok := c.TypeMap().TypeForOID(fieldOID)
+ if !ok {
+ return fmt.Errorf("unknown composite type field OID: %v", fieldOID)
+ }
+ fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return fields, nil
+}
+
+func (c *Conn) deallocateInvalidatedCachedStatements(ctx context.Context) error {
+ if txStatus := c.pgConn.TxStatus(); txStatus != 'I' && txStatus != 'T' {
+ return nil
+ }
+
+ if c.descriptionCache != nil {
+ c.descriptionCache.RemoveInvalidated()
+ }
+
+ var invalidatedStatements []*pgconn.StatementDescription
+ if c.statementCache != nil {
+ invalidatedStatements = c.statementCache.GetInvalidated()
+ }
+
+ if len(invalidatedStatements) == 0 {
+ return nil
+ }
+
+ pipeline := c.pgConn.StartPipeline(ctx)
+ defer pipeline.Close()
+
+ for _, sd := range invalidatedStatements {
+ pipeline.SendDeallocate(sd.Name)
+ }
+
+ err := pipeline.Sync()
+ if err != nil {
+ return fmt.Errorf("failed to deallocate cached statement(s): %w", err)
+ }
+
+ err = pipeline.Close()
+ if err != nil {
+ return fmt.Errorf("failed to deallocate cached statement(s): %w", err)
+ }
+
+ c.statementCache.RemoveInvalidated()
+ for _, sd := range invalidatedStatements {
+ delete(c.preparedStatements, sd.Name)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/copy_from.go b/vendor/github.com/jackc/pgx/v5/copy_from.go
new file mode 100644
index 0000000..abcd223
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/copy_from.go
@@ -0,0 +1,276 @@
+package pgx
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// CopyFromRows returns a CopyFromSource interface over the provided rows slice
+// making it usable by *Conn.CopyFrom.
+func CopyFromRows(rows [][]any) CopyFromSource {
+ return ©FromRows{rows: rows, idx: -1}
+}
+
+type copyFromRows struct {
+ rows [][]any
+ idx int
+}
+
+func (ctr *copyFromRows) Next() bool {
+ ctr.idx++
+ return ctr.idx < len(ctr.rows)
+}
+
+func (ctr *copyFromRows) Values() ([]any, error) {
+ return ctr.rows[ctr.idx], nil
+}
+
+func (ctr *copyFromRows) Err() error {
+ return nil
+}
+
+// CopyFromSlice returns a CopyFromSource interface over a dynamic func
+// making it usable by *Conn.CopyFrom.
+func CopyFromSlice(length int, next func(int) ([]any, error)) CopyFromSource {
+ return ©FromSlice{next: next, idx: -1, len: length}
+}
+
+type copyFromSlice struct {
+ next func(int) ([]any, error)
+ idx int
+ len int
+ err error
+}
+
+func (cts *copyFromSlice) Next() bool {
+ cts.idx++
+ return cts.idx < cts.len
+}
+
+func (cts *copyFromSlice) Values() ([]any, error) {
+ values, err := cts.next(cts.idx)
+ if err != nil {
+ cts.err = err
+ }
+ return values, err
+}
+
+func (cts *copyFromSlice) Err() error {
+ return cts.err
+}
+
+// CopyFromFunc returns a CopyFromSource interface that relies on nxtf for values.
+// nxtf returns rows until it either signals an 'end of data' by returning row=nil and err=nil,
+// or it returns an error. If nxtf returns an error, the copy is aborted.
+func CopyFromFunc(nxtf func() (row []any, err error)) CopyFromSource {
+ return ©FromFunc{next: nxtf}
+}
+
+type copyFromFunc struct {
+ next func() ([]any, error)
+ valueRow []any
+ err error
+}
+
+func (g *copyFromFunc) Next() bool {
+ g.valueRow, g.err = g.next()
+ // only return true if valueRow exists and no error
+ return g.valueRow != nil && g.err == nil
+}
+
+func (g *copyFromFunc) Values() ([]any, error) {
+ return g.valueRow, g.err
+}
+
+func (g *copyFromFunc) Err() error {
+ return g.err
+}
+
+// CopyFromSource is the interface used by *Conn.CopyFrom as the source for copy data.
+type CopyFromSource interface {
+ // Next returns true if there is another row and makes the next row data
+ // available to Values(). When there are no more rows available or an error
+ // has occurred it returns false.
+ Next() bool
+
+ // Values returns the values for the current row.
+ Values() ([]any, error)
+
+ // Err returns any error that has been encountered by the CopyFromSource. If
+ // this is not nil *Conn.CopyFrom will abort the copy.
+ Err() error
+}
+
+type copyFrom struct {
+ conn *Conn
+ tableName Identifier
+ columnNames []string
+ rowSrc CopyFromSource
+ readerErrChan chan error
+ mode QueryExecMode
+}
+
+func (ct *copyFrom) run(ctx context.Context) (int64, error) {
+ if ct.conn.copyFromTracer != nil {
+ ctx = ct.conn.copyFromTracer.TraceCopyFromStart(ctx, ct.conn, TraceCopyFromStartData{
+ TableName: ct.tableName,
+ ColumnNames: ct.columnNames,
+ })
+ }
+
+ quotedTableName := ct.tableName.Sanitize()
+ cbuf := &bytes.Buffer{}
+ for i, cn := range ct.columnNames {
+ if i != 0 {
+ cbuf.WriteString(", ")
+ }
+ cbuf.WriteString(quoteIdentifier(cn))
+ }
+ quotedColumnNames := cbuf.String()
+
+ var sd *pgconn.StatementDescription
+ switch ct.mode {
+ case QueryExecModeExec, QueryExecModeSimpleProtocol:
+ // These modes don't support the binary format. Before the inclusion of the
+ // QueryExecModes, Conn.Prepare was called on every COPY operation to get
+ // the OIDs. These prepared statements were not cached.
+ //
+ // Since that's the same behavior provided by QueryExecModeDescribeExec,
+ // we'll default to that mode.
+ ct.mode = QueryExecModeDescribeExec
+ fallthrough
+ case QueryExecModeCacheStatement, QueryExecModeCacheDescribe, QueryExecModeDescribeExec:
+ var err error
+ sd, err = ct.conn.getStatementDescription(
+ ctx,
+ ct.mode,
+ fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName),
+ )
+ if err != nil {
+ return 0, fmt.Errorf("statement description failed: %w", err)
+ }
+ default:
+ return 0, fmt.Errorf("unknown QueryExecMode: %v", ct.mode)
+ }
+
+ r, w := io.Pipe()
+ doneChan := make(chan struct{})
+
+ go func() {
+ defer close(doneChan)
+
+ // Purposely NOT using defer w.Close(). See https://github.com/golang/go/issues/24283.
+ buf := ct.conn.wbuf
+
+ buf = append(buf, "PGCOPY\n\377\r\n\000"...)
+ buf = pgio.AppendInt32(buf, 0)
+ buf = pgio.AppendInt32(buf, 0)
+
+ moreRows := true
+ for moreRows {
+ var err error
+ moreRows, buf, err = ct.buildCopyBuf(buf, sd)
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if ct.rowSrc.Err() != nil {
+ w.CloseWithError(ct.rowSrc.Err())
+ return
+ }
+
+ if len(buf) > 0 {
+ _, err = w.Write(buf)
+ if err != nil {
+ w.Close()
+ return
+ }
+ }
+
+ buf = buf[:0]
+ }
+
+ w.Close()
+ }()
+
+ commandTag, err := ct.conn.pgConn.CopyFrom(ctx, r, fmt.Sprintf("copy %s ( %s ) from stdin binary;", quotedTableName, quotedColumnNames))
+
+ r.Close()
+ <-doneChan
+
+ if ct.conn.copyFromTracer != nil {
+ ct.conn.copyFromTracer.TraceCopyFromEnd(ctx, ct.conn, TraceCopyFromEndData{
+ CommandTag: commandTag,
+ Err: err,
+ })
+ }
+
+ return commandTag.RowsAffected(), err
+}
+
+func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (bool, []byte, error) {
+ const sendBufSize = 65536 - 5 // The packet has a 5-byte header
+ lastBufLen := 0
+ largestRowLen := 0
+
+ for ct.rowSrc.Next() {
+ lastBufLen = len(buf)
+
+ values, err := ct.rowSrc.Values()
+ if err != nil {
+ return false, nil, err
+ }
+ if len(values) != len(ct.columnNames) {
+ return false, nil, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(ct.columnNames)))
+ for i, val := range values {
+ buf, err = encodeCopyValue(ct.conn.typeMap, buf, sd.Fields[i].DataTypeOID, val)
+ if err != nil {
+ return false, nil, err
+ }
+ }
+
+ rowLen := len(buf) - lastBufLen
+ if rowLen > largestRowLen {
+ largestRowLen = rowLen
+ }
+
+ // Try not to overflow size of the buffer PgConn.CopyFrom will be reading into. If that happens then the nature of
+ // io.Pipe means that the next Read will be short. This can lead to pathological send sizes such as 65531, 13, 65531
+ // 13, 65531, 13, 65531, 13.
+ if len(buf) > sendBufSize-largestRowLen {
+ return true, buf, nil
+ }
+ }
+
+ return false, buf, nil
+}
+
+// CopyFrom uses the PostgreSQL copy protocol to perform bulk data insertion. It returns the number of rows copied and
+// an error.
+//
+// CopyFrom requires all values use the binary format. A pgtype.Type that supports the binary format must be registered
+// for the type of each column. Almost all types implemented by pgx support the binary format.
+//
+// Even though enum types appear to be strings they still must be registered to use with CopyFrom. This can be done with
+// Conn.LoadType and pgtype.Map.RegisterType.
+func (c *Conn) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
+ ct := ©From{
+ conn: c,
+ tableName: tableName,
+ columnNames: columnNames,
+ rowSrc: rowSrc,
+ readerErrChan: make(chan error),
+ mode: c.config.DefaultQueryExecMode,
+ }
+
+ return ct.run(ctx)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/derived_types.go b/vendor/github.com/jackc/pgx/v5/derived_types.go
new file mode 100644
index 0000000..72c0a24
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/derived_types.go
@@ -0,0 +1,256 @@
+package pgx
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+/*
+buildLoadDerivedTypesSQL generates the correct query for retrieving type information.
+
+ pgVersion: the major version of the PostgreSQL server
+ typeNames: the names of the types to load. If nil, load all types.
+*/
+func buildLoadDerivedTypesSQL(pgVersion int64, typeNames []string) string {
+ supportsMultirange := (pgVersion >= 14)
+ var typeNamesClause string
+
+ if typeNames == nil {
+ // This should not occur; this will not return any types
+ typeNamesClause = "= ''"
+ } else {
+ typeNamesClause = "= ANY($1)"
+ }
+ parts := make([]string, 0, 10)
+
+ // Each of the type names provided might be found in pg_class or pg_type.
+ // Additionally, it may or may not include a schema portion.
+ parts = append(parts, `
+WITH RECURSIVE
+-- find the OIDs in pg_class which match one of the provided type names
+selected_classes(oid,reltype) AS (
+ -- this query uses the namespace search path, so will match type names without a schema prefix
+ SELECT pg_class.oid, pg_class.reltype
+ FROM pg_catalog.pg_class
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = pg_class.relnamespace
+ WHERE pg_catalog.pg_table_is_visible(pg_class.oid)
+ AND relname `, typeNamesClause, `
+UNION ALL
+ -- this query will only match type names which include the schema prefix
+ SELECT pg_class.oid, pg_class.reltype
+ FROM pg_class
+ INNER JOIN pg_namespace ON (pg_class.relnamespace = pg_namespace.oid)
+ WHERE nspname || '.' || relname `, typeNamesClause, `
+),
+selected_types(oid) AS (
+ -- collect the OIDs from pg_types which correspond to the selected classes
+ SELECT reltype AS oid
+ FROM selected_classes
+UNION ALL
+ -- as well as any other type names which match our criteria
+ SELECT pg_type.oid
+ FROM pg_type
+ LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
+ WHERE typname `, typeNamesClause, `
+ OR nspname || '.' || typname `, typeNamesClause, `
+),
+-- this builds a parent/child mapping of objects, allowing us to know
+-- all the child (ie: dependent) types that a parent (type) requires
+-- As can be seen, there are 3 ways this can occur (the last of which
+-- is due to being a composite class, where the composite fields are children)
+pc(parent, child) AS (
+ SELECT parent.oid, parent.typelem
+ FROM pg_type parent
+ WHERE parent.typtype = 'b' AND parent.typelem != 0
+UNION ALL
+ SELECT parent.oid, parent.typbasetype
+ FROM pg_type parent
+ WHERE parent.typtypmod = -1 AND parent.typbasetype != 0
+UNION ALL
+ SELECT pg_type.oid, atttypid
+ FROM pg_attribute
+ INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
+ INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
+ WHERE NOT attisdropped
+ AND attnum > 0
+),
+-- Now construct a recursive query which includes a 'depth' element.
+-- This is used to ensure that the "youngest" children are registered before
+-- their parents.
+relationships(parent, child, depth) AS (
+ SELECT DISTINCT 0::OID, selected_types.oid, 0
+ FROM selected_types
+UNION ALL
+ SELECT pg_type.oid AS parent, pg_attribute.atttypid AS child, 1
+ FROM selected_classes c
+ inner join pg_type ON (c.reltype = pg_type.oid)
+ inner join pg_attribute on (c.oid = pg_attribute.attrelid)
+UNION ALL
+ SELECT pc.parent, pc.child, relationships.depth + 1
+ FROM pc
+ INNER JOIN relationships ON (pc.parent = relationships.child)
+),
+-- composite fields need to be encapsulated as a couple of arrays to provide the required information for registration
+composite AS (
+ SELECT pg_type.oid, ARRAY_AGG(attname ORDER BY attnum) AS attnames, ARRAY_AGG(atttypid ORDER BY ATTNUM) AS atttypids
+ FROM pg_attribute
+ INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
+ INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
+ WHERE NOT attisdropped
+ AND attnum > 0
+ GROUP BY pg_type.oid
+)
+-- Bring together this information, showing all the information which might possibly be required
+-- to complete the registration, applying filters to only show the items which relate to the selected
+-- types/classes.
+SELECT typname,
+ pg_namespace.nspname,
+ typtype,
+ typbasetype,
+ typelem,
+ pg_type.oid,`)
+ if supportsMultirange {
+ parts = append(parts, `
+ COALESCE(multirange.rngtypid, 0) AS rngtypid,`)
+ } else {
+ parts = append(parts, `
+ 0 AS rngtypid,`)
+ }
+ parts = append(parts, `
+ COALESCE(pg_range.rngsubtype, 0) AS rngsubtype,
+ attnames, atttypids
+ FROM relationships
+ INNER JOIN pg_type ON (pg_type.oid = relationships.child)
+ LEFT OUTER JOIN pg_range ON (pg_type.oid = pg_range.rngtypid)`)
+ if supportsMultirange {
+ parts = append(parts, `
+ LEFT OUTER JOIN pg_range multirange ON (pg_type.oid = multirange.rngmultitypid)`)
+ }
+
+ parts = append(parts, `
+ LEFT OUTER JOIN composite USING (oid)
+ LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
+ WHERE NOT (typtype = 'b' AND typelem = 0)`)
+ parts = append(parts, `
+ GROUP BY typname, pg_namespace.nspname, typtype, typbasetype, typelem, pg_type.oid, pg_range.rngsubtype,`)
+ if supportsMultirange {
+ parts = append(parts, `
+ multirange.rngtypid,`)
+ }
+ parts = append(parts, `
+ attnames, atttypids
+ ORDER BY MAX(depth) desc, typname;`)
+ return strings.Join(parts, "")
+}
+
+type derivedTypeInfo struct {
+ Oid, Typbasetype, Typelem, Rngsubtype, Rngtypid uint32
+ TypeName, Typtype, NspName string
+ Attnames []string
+ Atttypids []uint32
+}
+
+// LoadTypes performs a single (complex) query, returning all the required
+// information to register the named types, as well as any other types directly
+// or indirectly required to complete the registration.
+// The result of this call can be passed into RegisterTypes to complete the process.
+func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Type, error) {
+ m := c.TypeMap()
+ if len(typeNames) == 0 {
+ return nil, fmt.Errorf("No type names were supplied.")
+ }
+
+ // Disregard server version errors. This will result in
+ // the SQL not support recent structures such as multirange
+ serverVersion, _ := serverVersion(c)
+ sql := buildLoadDerivedTypesSQL(serverVersion, typeNames)
+ rows, err := c.Query(ctx, sql, QueryExecModeSimpleProtocol, typeNames)
+ if err != nil {
+ return nil, fmt.Errorf("While generating load types query: %w", err)
+ }
+ defer rows.Close()
+ result := make([]*pgtype.Type, 0, 100)
+ for rows.Next() {
+ ti := derivedTypeInfo{}
+ err = rows.Scan(&ti.TypeName, &ti.NspName, &ti.Typtype, &ti.Typbasetype, &ti.Typelem, &ti.Oid, &ti.Rngtypid, &ti.Rngsubtype, &ti.Attnames, &ti.Atttypids)
+ if err != nil {
+ return nil, fmt.Errorf("While scanning type information: %w", err)
+ }
+ var type_ *pgtype.Type
+ switch ti.Typtype {
+ case "b": // array
+ dt, ok := m.TypeForOID(ti.Typelem)
+ if !ok {
+ return nil, fmt.Errorf("Array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName)
+ }
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}
+ case "c": // composite
+ var fields []pgtype.CompositeCodecField
+ for i, fieldName := range ti.Attnames {
+ dt, ok := m.TypeForOID(ti.Atttypids[i])
+ if !ok {
+ return nil, fmt.Errorf("Unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, fieldName, ti.Atttypids[i])
+ }
+ fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
+ }
+
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.CompositeCodec{Fields: fields}}
+ case "d": // domain
+ dt, ok := m.TypeForOID(ti.Typbasetype)
+ if !ok {
+ return nil, fmt.Errorf("Domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName)
+ }
+
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: dt.Codec}
+ case "e": // enum
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.EnumCodec{}}
+ case "r": // range
+ dt, ok := m.TypeForOID(ti.Rngsubtype)
+ if !ok {
+ return nil, fmt.Errorf("Range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName)
+ }
+
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.RangeCodec{ElementType: dt}}
+ case "m": // multirange
+ dt, ok := m.TypeForOID(ti.Rngtypid)
+ if !ok {
+ return nil, fmt.Errorf("Multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName)
+ }
+
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}
+ default:
+ return nil, fmt.Errorf("Unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName)
+ }
+
+ // the type_ is imposible to be null
+ m.RegisterType(type_)
+ if ti.NspName != "" {
+ nspType := &pgtype.Type{Name: ti.NspName + "." + type_.Name, OID: type_.OID, Codec: type_.Codec}
+ m.RegisterType(nspType)
+ result = append(result, nspType)
+ }
+ result = append(result, type_)
+ }
+ return result, nil
+}
+
+// serverVersion returns the postgresql server version.
+func serverVersion(c *Conn) (int64, error) {
+ serverVersionStr := c.PgConn().ParameterStatus("server_version")
+ serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
+ // if not PostgreSQL do nothing
+ if serverVersionStr == "" {
+ return 0, fmt.Errorf("Cannot identify server version in %q", serverVersionStr)
+ }
+
+ version, err := strconv.ParseInt(serverVersionStr, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("postgres version parsing failed: %w", err)
+ }
+ return version, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/doc.go b/vendor/github.com/jackc/pgx/v5/doc.go
new file mode 100644
index 0000000..5d2ae38
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/doc.go
@@ -0,0 +1,194 @@
+// Package pgx is a PostgreSQL database driver.
+/*
+pgx provides a native PostgreSQL driver and can act as a database/sql driver. The native PostgreSQL interface is similar
+to the database/sql interface while providing better speed and access to PostgreSQL specific features. Use
+github.com/jackc/pgx/v5/stdlib to use pgx as a database/sql compatible driver. See that package's documentation for
+details.
+
+Establishing a Connection
+
+The primary way of establishing a connection is with [pgx.Connect]:
+
+ conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
+
+The database connection string can be in URL or key/value format. Both PostgreSQL settings and pgx settings can be
+specified here. In addition, a config struct can be created by [ParseConfig] and modified before establishing the
+connection with [ConnectConfig] to configure settings such as tracing that cannot be configured with a connection
+string.
+
+Connection Pool
+
+[*pgx.Conn] represents a single connection to the database and is not concurrency safe. Use package
+github.com/jackc/pgx/v5/pgxpool for a concurrency safe connection pool.
+
+Query Interface
+
+pgx implements Query in the familiar database/sql style. However, pgx provides generic functions such as CollectRows and
+ForEachRow that are a simpler and safer way of processing rows than manually calling defer rows.Close(), rows.Next(),
+rows.Scan, and rows.Err().
+
+CollectRows can be used collect all returned rows into a slice.
+
+ rows, _ := conn.Query(context.Background(), "select generate_series(1,$1)", 5)
+ numbers, err := pgx.CollectRows(rows, pgx.RowTo[int32])
+ if err != nil {
+ return err
+ }
+ // numbers => [1 2 3 4 5]
+
+ForEachRow can be used to execute a callback function for every row. This is often easier than iterating over rows
+directly.
+
+ var sum, n int32
+ rows, _ := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
+ _, err := pgx.ForEachRow(rows, []any{&n}, func() error {
+ sum += n
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+pgx also implements QueryRow in the same style as database/sql.
+
+ var name string
+ var weight int64
+ err := conn.QueryRow(context.Background(), "select name, weight from widgets where id=$1", 42).Scan(&name, &weight)
+ if err != nil {
+ return err
+ }
+
+Use Exec to execute a query that does not return a result set.
+
+ commandTag, err := conn.Exec(context.Background(), "delete from widgets where id=$1", 42)
+ if err != nil {
+ return err
+ }
+ if commandTag.RowsAffected() != 1 {
+ return errors.New("No row found to delete")
+ }
+
+PostgreSQL Data Types
+
+pgx uses the pgtype package to converting Go values to and from PostgreSQL values. It supports many PostgreSQL types
+directly and is customizable and extendable. User defined data types such as enums, domains, and composite types may
+require type registration. See that package's documentation for details.
+
+Transactions
+
+Transactions are started by calling Begin.
+
+ tx, err := conn.Begin(context.Background())
+ if err != nil {
+ return err
+ }
+ // Rollback is safe to call even if the tx is already closed, so if
+ // the tx commits successfully, this is a no-op
+ defer tx.Rollback(context.Background())
+
+ _, err = tx.Exec(context.Background(), "insert into foo(id) values (1)")
+ if err != nil {
+ return err
+ }
+
+ err = tx.Commit(context.Background())
+ if err != nil {
+ return err
+ }
+
+The Tx returned from Begin also implements the Begin method. This can be used to implement pseudo nested transactions.
+These are internally implemented with savepoints.
+
+Use BeginTx to control the transaction mode. BeginTx also can be used to ensure a new transaction is created instead of
+a pseudo nested transaction.
+
+BeginFunc and BeginTxFunc are functions that begin a transaction, execute a function, and commit or rollback the
+transaction depending on the return value of the function. These can be simpler and less error prone to use.
+
+ err = pgx.BeginFunc(context.Background(), conn, func(tx pgx.Tx) error {
+ _, err := tx.Exec(context.Background(), "insert into foo(id) values (1)")
+ return err
+ })
+ if err != nil {
+ return err
+ }
+
+Prepared Statements
+
+Prepared statements can be manually created with the Prepare method. However, this is rarely necessary because pgx
+includes an automatic statement cache by default. Queries run through the normal Query, QueryRow, and Exec functions are
+automatically prepared on first execution and the prepared statement is reused on subsequent executions. See ParseConfig
+for information on how to customize or disable the statement cache.
+
+Copy Protocol
+
+Use CopyFrom to efficiently insert multiple rows at a time using the PostgreSQL copy protocol. CopyFrom accepts a
+CopyFromSource interface. If the data is already in a [][]any use CopyFromRows to wrap it in a CopyFromSource interface.
+Or implement CopyFromSource to avoid buffering the entire data set in memory.
+
+ rows := [][]any{
+ {"John", "Smith", int32(36)},
+ {"Jane", "Doe", int32(29)},
+ }
+
+ copyCount, err := conn.CopyFrom(
+ context.Background(),
+ pgx.Identifier{"people"},
+ []string{"first_name", "last_name", "age"},
+ pgx.CopyFromRows(rows),
+ )
+
+When you already have a typed array using CopyFromSlice can be more convenient.
+
+ rows := []User{
+ {"John", "Smith", 36},
+ {"Jane", "Doe", 29},
+ }
+
+ copyCount, err := conn.CopyFrom(
+ context.Background(),
+ pgx.Identifier{"people"},
+ []string{"first_name", "last_name", "age"},
+ pgx.CopyFromSlice(len(rows), func(i int) ([]any, error) {
+ return []any{rows[i].FirstName, rows[i].LastName, rows[i].Age}, nil
+ }),
+ )
+
+CopyFrom can be faster than an insert with as few as 5 rows.
+
+Listen and Notify
+
+pgx can listen to the PostgreSQL notification system with the `Conn.WaitForNotification` method. It blocks until a
+notification is received or the context is canceled.
+
+ _, err := conn.Exec(context.Background(), "listen channelname")
+ if err != nil {
+ return err
+ }
+
+ notification, err := conn.WaitForNotification(context.Background())
+ if err != nil {
+ return err
+ }
+ // do something with notification
+
+
+Tracing and Logging
+
+pgx supports tracing by setting ConnConfig.Tracer. To combine several tracers you can use the multitracer.Tracer.
+
+In addition, the tracelog package provides the TraceLog type which lets a traditional logger act as a Tracer.
+
+For debug tracing of the actual PostgreSQL wire protocol messages see github.com/jackc/pgx/v5/pgproto3.
+
+Lower Level PostgreSQL Functionality
+
+github.com/jackc/pgx/v5/pgconn contains a lower level PostgreSQL driver roughly at the level of libpq. pgx.Conn is
+implemented on top of pgconn. The Conn.PgConn() method can be used to access this lower layer.
+
+PgBouncer
+
+By default pgx automatically uses prepared statements. Prepared statements are incompatible with PgBouncer. This can be
+disabled by setting a different QueryExecMode in ConnConfig.DefaultQueryExecMode.
+*/
+package pgx
diff --git a/vendor/github.com/jackc/pgx/v5/extended_query_builder.go b/vendor/github.com/jackc/pgx/v5/extended_query_builder.go
new file mode 100644
index 0000000..526b0e9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/extended_query_builder.go
@@ -0,0 +1,146 @@
+package pgx
+
+import (
+ "fmt"
+
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// ExtendedQueryBuilder is used to choose the parameter formats, to format the parameters and to choose the result
+// formats for an extended query.
+type ExtendedQueryBuilder struct {
+ ParamValues [][]byte
+ paramValueBytes []byte
+ ParamFormats []int16
+ ResultFormats []int16
+}
+
+// Build sets ParamValues, ParamFormats, and ResultFormats for use with *PgConn.ExecParams or *PgConn.ExecPrepared. If
+// sd is nil then QueryExecModeExec behavior will be used.
+func (eqb *ExtendedQueryBuilder) Build(m *pgtype.Map, sd *pgconn.StatementDescription, args []any) error {
+ eqb.reset()
+
+ if sd == nil {
+ for i := range args {
+ err := eqb.appendParam(m, 0, pgtype.TextFormatCode, args[i])
+ if err != nil {
+ err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
+ return err
+ }
+ }
+ return nil
+ }
+
+ if len(sd.ParamOIDs) != len(args) {
+ return fmt.Errorf("mismatched param and argument count")
+ }
+
+ for i := range args {
+ err := eqb.appendParam(m, sd.ParamOIDs[i], -1, args[i])
+ if err != nil {
+ err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
+ return err
+ }
+ }
+
+ for i := range sd.Fields {
+ eqb.appendResultFormat(m.FormatCodeForOID(sd.Fields[i].DataTypeOID))
+ }
+
+ return nil
+}
+
+// appendParam appends a parameter to the query. format may be -1 to automatically choose the format. If arg is nil it
+// must be an untyped nil.
+func (eqb *ExtendedQueryBuilder) appendParam(m *pgtype.Map, oid uint32, format int16, arg any) error {
+ if format == -1 {
+ preferredFormat := eqb.chooseParameterFormatCode(m, oid, arg)
+ preferredErr := eqb.appendParam(m, oid, preferredFormat, arg)
+ if preferredErr == nil {
+ return nil
+ }
+
+ var otherFormat int16
+ if preferredFormat == TextFormatCode {
+ otherFormat = BinaryFormatCode
+ } else {
+ otherFormat = TextFormatCode
+ }
+
+ otherErr := eqb.appendParam(m, oid, otherFormat, arg)
+ if otherErr == nil {
+ return nil
+ }
+
+ return preferredErr // return the error from the preferred format
+ }
+
+ v, err := eqb.encodeExtendedParamValue(m, oid, format, arg)
+ if err != nil {
+ return err
+ }
+
+ eqb.ParamFormats = append(eqb.ParamFormats, format)
+ eqb.ParamValues = append(eqb.ParamValues, v)
+
+ return nil
+}
+
+// appendResultFormat appends a result format to the query.
+func (eqb *ExtendedQueryBuilder) appendResultFormat(format int16) {
+ eqb.ResultFormats = append(eqb.ResultFormats, format)
+}
+
+// reset readies eqb to build another query.
+func (eqb *ExtendedQueryBuilder) reset() {
+ eqb.ParamValues = eqb.ParamValues[0:0]
+ eqb.paramValueBytes = eqb.paramValueBytes[0:0]
+ eqb.ParamFormats = eqb.ParamFormats[0:0]
+ eqb.ResultFormats = eqb.ResultFormats[0:0]
+
+ if cap(eqb.ParamValues) > 64 {
+ eqb.ParamValues = make([][]byte, 0, 64)
+ }
+
+ if cap(eqb.paramValueBytes) > 256 {
+ eqb.paramValueBytes = make([]byte, 0, 256)
+ }
+
+ if cap(eqb.ParamFormats) > 64 {
+ eqb.ParamFormats = make([]int16, 0, 64)
+ }
+ if cap(eqb.ResultFormats) > 64 {
+ eqb.ResultFormats = make([]int16, 0, 64)
+ }
+}
+
+func (eqb *ExtendedQueryBuilder) encodeExtendedParamValue(m *pgtype.Map, oid uint32, formatCode int16, arg any) ([]byte, error) {
+ if eqb.paramValueBytes == nil {
+ eqb.paramValueBytes = make([]byte, 0, 128)
+ }
+
+ pos := len(eqb.paramValueBytes)
+
+ buf, err := m.Encode(oid, formatCode, arg, eqb.paramValueBytes)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+ eqb.paramValueBytes = buf
+ return eqb.paramValueBytes[pos:], nil
+}
+
+// chooseParameterFormatCode determines the correct format code for an
+// argument to a prepared statement. It defaults to TextFormatCode if no
+// determination can be made.
+func (eqb *ExtendedQueryBuilder) chooseParameterFormatCode(m *pgtype.Map, oid uint32, arg any) int16 {
+ switch arg.(type) {
+ case string, *string:
+ return TextFormatCode
+ }
+
+ return m.FormatCodeForOID(oid)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go b/vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go
new file mode 100644
index 0000000..89e0c22
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go
@@ -0,0 +1,70 @@
+// Package iobufpool implements a global segregated-fit pool of buffers for IO.
+//
+// It uses *[]byte instead of []byte to avoid the sync.Pool allocation with Put. Unfortunately, using a pointer to avoid
+// an allocation is purposely not documented. https://github.com/golang/go/issues/16323
+package iobufpool
+
+import "sync"
+
+const minPoolExpOf2 = 8
+
+var pools [18]*sync.Pool
+
+func init() {
+ for i := range pools {
+ bufLen := 1 << (minPoolExpOf2 + i)
+ pools[i] = &sync.Pool{
+ New: func() any {
+ buf := make([]byte, bufLen)
+ return &buf
+ },
+ }
+ }
+}
+
+// Get gets a []byte of len size with cap <= size*2.
+func Get(size int) *[]byte {
+ i := getPoolIdx(size)
+ if i >= len(pools) {
+ buf := make([]byte, size)
+ return &buf
+ }
+
+ ptrBuf := (pools[i].Get().(*[]byte))
+ *ptrBuf = (*ptrBuf)[:size]
+
+ return ptrBuf
+}
+
+func getPoolIdx(size int) int {
+ size--
+ size >>= minPoolExpOf2
+ i := 0
+ for size > 0 {
+ size >>= 1
+ i++
+ }
+
+ return i
+}
+
+// Put returns buf to the pool.
+func Put(buf *[]byte) {
+ i := putPoolIdx(cap(*buf))
+ if i < 0 {
+ return
+ }
+
+ pools[i].Put(buf)
+}
+
+func putPoolIdx(size int) int {
+ minPoolSize := 1 << minPoolExpOf2
+ for i := range pools {
+ if size == minPoolSize< ... "
+ exit 1
+fi
+
+commits=("$@")
+benchmarks_dir=benchmarks
+
+if ! mkdir -p "${benchmarks_dir}"; then
+ echo "Unable to create dir for benchmarks data"
+ exit 1
+fi
+
+# Benchmark results
+bench_files=()
+
+# Run benchmark for each listed commit
+for i in "${!commits[@]}"; do
+ commit="${commits[i]}"
+ git checkout "$commit" || {
+ echo "Failed to checkout $commit"
+ exit 1
+ }
+
+ # Sanitized commmit message
+ commit_message=$(git log -1 --pretty=format:"%s" | tr -c '[:alnum:]-_' '_')
+
+ # Benchmark data will go there
+ bench_file="${benchmarks_dir}/${i}_${commit_message}.bench"
+
+ if ! go test -bench=. -count=10 >"$bench_file"; then
+ echo "Benchmarking failed for commit $commit"
+ exit 1
+ fi
+
+ bench_files+=("$bench_file")
+done
+
+# go install golang.org/x/perf/cmd/benchstat[@latest]
+benchstat "${bench_files[@]}"
diff --git a/vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go b/vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go
new file mode 100644
index 0000000..b516817
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go
@@ -0,0 +1,460 @@
+package sanitize
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "slices"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+// Part is either a string or an int. A string is raw SQL. An int is a
+// argument placeholder.
+type Part any
+
+type Query struct {
+ Parts []Part
+}
+
+// utf.DecodeRune returns the utf8.RuneError for errors. But that is actually rune U+FFFD -- the unicode replacement
+// character. utf8.RuneError is not an error if it is also width 3.
+//
+// https://github.com/jackc/pgx/issues/1380
+const replacementcharacterwidth = 3
+
+const maxBufSize = 16384 // 16 Ki
+
+var bufPool = &pool[*bytes.Buffer]{
+ new: func() *bytes.Buffer {
+ return &bytes.Buffer{}
+ },
+ reset: func(b *bytes.Buffer) bool {
+ n := b.Len()
+ b.Reset()
+ return n < maxBufSize
+ },
+}
+
+var null = []byte("null")
+
+func (q *Query) Sanitize(args ...any) (string, error) {
+ argUse := make([]bool, len(args))
+ buf := bufPool.get()
+ defer bufPool.put(buf)
+
+ for _, part := range q.Parts {
+ switch part := part.(type) {
+ case string:
+ buf.WriteString(part)
+ case int:
+ argIdx := part - 1
+ var p []byte
+ if argIdx < 0 {
+ return "", fmt.Errorf("first sql argument must be > 0")
+ }
+
+ if argIdx >= len(args) {
+ return "", fmt.Errorf("insufficient arguments")
+ }
+
+ // Prevent SQL injection via Line Comment Creation
+ // https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
+ buf.WriteByte(' ')
+
+ arg := args[argIdx]
+ switch arg := arg.(type) {
+ case nil:
+ p = null
+ case int64:
+ p = strconv.AppendInt(buf.AvailableBuffer(), arg, 10)
+ case float64:
+ p = strconv.AppendFloat(buf.AvailableBuffer(), arg, 'f', -1, 64)
+ case bool:
+ p = strconv.AppendBool(buf.AvailableBuffer(), arg)
+ case []byte:
+ p = QuoteBytes(buf.AvailableBuffer(), arg)
+ case string:
+ p = QuoteString(buf.AvailableBuffer(), arg)
+ case time.Time:
+ p = arg.Truncate(time.Microsecond).
+ AppendFormat(buf.AvailableBuffer(), "'2006-01-02 15:04:05.999999999Z07:00:00'")
+ default:
+ return "", fmt.Errorf("invalid arg type: %T", arg)
+ }
+ argUse[argIdx] = true
+
+ buf.Write(p)
+
+ // Prevent SQL injection via Line Comment Creation
+ // https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
+ buf.WriteByte(' ')
+ default:
+ return "", fmt.Errorf("invalid Part type: %T", part)
+ }
+ }
+
+ for i, used := range argUse {
+ if !used {
+ return "", fmt.Errorf("unused argument: %d", i)
+ }
+ }
+ return buf.String(), nil
+}
+
+func NewQuery(sql string) (*Query, error) {
+ query := &Query{}
+ query.init(sql)
+
+ return query, nil
+}
+
+var sqlLexerPool = &pool[*sqlLexer]{
+ new: func() *sqlLexer {
+ return &sqlLexer{}
+ },
+ reset: func(sl *sqlLexer) bool {
+ *sl = sqlLexer{}
+ return true
+ },
+}
+
+func (q *Query) init(sql string) {
+ parts := q.Parts[:0]
+ if parts == nil {
+ // dirty, but fast heuristic to preallocate for ~90% usecases
+ n := strings.Count(sql, "$") + strings.Count(sql, "--") + 1
+ parts = make([]Part, 0, n)
+ }
+
+ l := sqlLexerPool.get()
+ defer sqlLexerPool.put(l)
+
+ l.src = sql
+ l.stateFn = rawState
+ l.parts = parts
+
+ for l.stateFn != nil {
+ l.stateFn = l.stateFn(l)
+ }
+
+ q.Parts = l.parts
+}
+
+func QuoteString(dst []byte, str string) []byte {
+ const quote = '\''
+
+ // Preallocate space for the worst case scenario
+ dst = slices.Grow(dst, len(str)*2+2)
+
+ // Add opening quote
+ dst = append(dst, quote)
+
+ // Iterate through the string without allocating
+ for i := 0; i < len(str); i++ {
+ if str[i] == quote {
+ dst = append(dst, quote, quote)
+ } else {
+ dst = append(dst, str[i])
+ }
+ }
+
+ // Add closing quote
+ dst = append(dst, quote)
+
+ return dst
+}
+
+func QuoteBytes(dst, buf []byte) []byte {
+ if len(buf) == 0 {
+ return append(dst, `'\x'`...)
+ }
+
+ // Calculate required length
+ requiredLen := 3 + hex.EncodedLen(len(buf)) + 1
+
+ // Ensure dst has enough capacity
+ if cap(dst)-len(dst) < requiredLen {
+ newDst := make([]byte, len(dst), len(dst)+requiredLen)
+ copy(newDst, dst)
+ dst = newDst
+ }
+
+ // Record original length and extend slice
+ origLen := len(dst)
+ dst = dst[:origLen+requiredLen]
+
+ // Add prefix
+ dst[origLen] = '\''
+ dst[origLen+1] = '\\'
+ dst[origLen+2] = 'x'
+
+ // Encode bytes directly into dst
+ hex.Encode(dst[origLen+3:len(dst)-1], buf)
+
+ // Add suffix
+ dst[len(dst)-1] = '\''
+
+ return dst
+}
+
+type sqlLexer struct {
+ src string
+ start int
+ pos int
+ nested int // multiline comment nesting level.
+ stateFn stateFn
+ parts []Part
+}
+
+type stateFn func(*sqlLexer) stateFn
+
+func rawState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case 'e', 'E':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '\'' {
+ l.pos += width
+ return escapeStringState
+ }
+ case '\'':
+ return singleQuoteState
+ case '"':
+ return doubleQuoteState
+ case '$':
+ nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
+ if '0' <= nextRune && nextRune <= '9' {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos-width])
+ }
+ l.start = l.pos
+ return placeholderState
+ }
+ case '-':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '-' {
+ l.pos += width
+ return oneLineCommentState
+ }
+ case '/':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '*' {
+ l.pos += width
+ return multilineCommentState
+ }
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+func singleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+func doubleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '"':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '"' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+// placeholderState consumes a placeholder value. The $ must have already has
+// already been consumed. The first rune must be a digit.
+func placeholderState(l *sqlLexer) stateFn {
+ num := 0
+
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ if '0' <= r && r <= '9' {
+ num *= 10
+ num += int(r - '0')
+ } else {
+ l.parts = append(l.parts, num)
+ l.pos -= width
+ l.start = l.pos
+ return rawState
+ }
+ }
+}
+
+func escapeStringState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+func oneLineCommentState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\n', '\r':
+ return rawState
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+func multilineCommentState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '/':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '*' {
+ l.pos += width
+ l.nested++
+ }
+ case '*':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '/' {
+ continue
+ }
+
+ l.pos += width
+ if l.nested == 0 {
+ return rawState
+ }
+ l.nested--
+
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+var queryPool = &pool[*Query]{
+ new: func() *Query {
+ return &Query{}
+ },
+ reset: func(q *Query) bool {
+ n := len(q.Parts)
+ q.Parts = q.Parts[:0]
+ return n < 64 // drop too large queries
+ },
+}
+
+// SanitizeSQL replaces placeholder values with args. It quotes and escapes args
+// as necessary. This function is only safe when standard_conforming_strings is
+// on.
+func SanitizeSQL(sql string, args ...any) (string, error) {
+ query := queryPool.get()
+ query.init(sql)
+ defer queryPool.put(query)
+
+ return query.Sanitize(args...)
+}
+
+type pool[E any] struct {
+ p sync.Pool
+ new func() E
+ reset func(E) bool
+}
+
+func (pool *pool[E]) get() E {
+ v, ok := pool.p.Get().(E)
+ if !ok {
+ v = pool.new()
+ }
+
+ return v
+}
+
+func (p *pool[E]) put(v E) {
+ if p.reset(v) {
+ p.p.Put(v)
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go
new file mode 100644
index 0000000..dec83f4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go
@@ -0,0 +1,112 @@
+package stmtcache
+
+import (
+ "container/list"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// LRUCache implements Cache with a Least Recently Used (LRU) cache.
+type LRUCache struct {
+ cap int
+ m map[string]*list.Element
+ l *list.List
+ invalidStmts []*pgconn.StatementDescription
+}
+
+// NewLRUCache creates a new LRUCache. cap is the maximum size of the cache.
+func NewLRUCache(cap int) *LRUCache {
+ return &LRUCache{
+ cap: cap,
+ m: make(map[string]*list.Element),
+ l: list.New(),
+ }
+}
+
+// Get returns the statement description for sql. Returns nil if not found.
+func (c *LRUCache) Get(key string) *pgconn.StatementDescription {
+ if el, ok := c.m[key]; ok {
+ c.l.MoveToFront(el)
+ return el.Value.(*pgconn.StatementDescription)
+ }
+
+ return nil
+
+}
+
+// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache or
+// sd.SQL has been invalidated and HandleInvalidated has not been called yet.
+func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
+ if sd.SQL == "" {
+ panic("cannot store statement description with empty SQL")
+ }
+
+ if _, present := c.m[sd.SQL]; present {
+ return
+ }
+
+ // The statement may have been invalidated but not yet handled. Do not readd it to the cache.
+ for _, invalidSD := range c.invalidStmts {
+ if invalidSD.SQL == sd.SQL {
+ return
+ }
+ }
+
+ if c.l.Len() == c.cap {
+ c.invalidateOldest()
+ }
+
+ el := c.l.PushFront(sd)
+ c.m[sd.SQL] = el
+}
+
+// Invalidate invalidates statement description identified by sql. Does nothing if not found.
+func (c *LRUCache) Invalidate(sql string) {
+ if el, ok := c.m[sql]; ok {
+ delete(c.m, sql)
+ c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
+ c.l.Remove(el)
+ }
+}
+
+// InvalidateAll invalidates all statement descriptions.
+func (c *LRUCache) InvalidateAll() {
+ el := c.l.Front()
+ for el != nil {
+ c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
+ el = el.Next()
+ }
+
+ c.m = make(map[string]*list.Element)
+ c.l = list.New()
+}
+
+// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
+func (c *LRUCache) GetInvalidated() []*pgconn.StatementDescription {
+ return c.invalidStmts
+}
+
+// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
+// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
+// never seen by the call to GetInvalidated.
+func (c *LRUCache) RemoveInvalidated() {
+ c.invalidStmts = nil
+}
+
+// Len returns the number of cached prepared statement descriptions.
+func (c *LRUCache) Len() int {
+ return c.l.Len()
+}
+
+// Cap returns the maximum number of cached prepared statement descriptions.
+func (c *LRUCache) Cap() int {
+ return c.cap
+}
+
+func (c *LRUCache) invalidateOldest() {
+ oldest := c.l.Back()
+ sd := oldest.Value.(*pgconn.StatementDescription)
+ c.invalidStmts = append(c.invalidStmts, sd)
+ delete(c.m, sd.SQL)
+ c.l.Remove(oldest)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go
new file mode 100644
index 0000000..d57bdd2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go
@@ -0,0 +1,45 @@
+// Package stmtcache is a cache for statement descriptions.
+package stmtcache
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// StatementName returns a statement name that will be stable for sql across multiple connections and program
+// executions.
+func StatementName(sql string) string {
+ digest := sha256.Sum256([]byte(sql))
+ return "stmtcache_" + hex.EncodeToString(digest[0:24])
+}
+
+// Cache caches statement descriptions.
+type Cache interface {
+ // Get returns the statement description for sql. Returns nil if not found.
+ Get(sql string) *pgconn.StatementDescription
+
+ // Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
+ Put(sd *pgconn.StatementDescription)
+
+ // Invalidate invalidates statement description identified by sql. Does nothing if not found.
+ Invalidate(sql string)
+
+ // InvalidateAll invalidates all statement descriptions.
+ InvalidateAll()
+
+ // GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
+ GetInvalidated() []*pgconn.StatementDescription
+
+ // RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
+ // call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
+ // never seen by the call to GetInvalidated.
+ RemoveInvalidated()
+
+ // Len returns the number of cached prepared statement descriptions.
+ Len() int
+
+ // Cap returns the maximum number of cached prepared statement descriptions.
+ Cap() int
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go
new file mode 100644
index 0000000..6964132
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go
@@ -0,0 +1,77 @@
+package stmtcache
+
+import (
+ "math"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// UnlimitedCache implements Cache with no capacity limit.
+type UnlimitedCache struct {
+ m map[string]*pgconn.StatementDescription
+ invalidStmts []*pgconn.StatementDescription
+}
+
+// NewUnlimitedCache creates a new UnlimitedCache.
+func NewUnlimitedCache() *UnlimitedCache {
+ return &UnlimitedCache{
+ m: make(map[string]*pgconn.StatementDescription),
+ }
+}
+
+// Get returns the statement description for sql. Returns nil if not found.
+func (c *UnlimitedCache) Get(sql string) *pgconn.StatementDescription {
+ return c.m[sql]
+}
+
+// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
+func (c *UnlimitedCache) Put(sd *pgconn.StatementDescription) {
+ if sd.SQL == "" {
+ panic("cannot store statement description with empty SQL")
+ }
+
+ if _, present := c.m[sd.SQL]; present {
+ return
+ }
+
+ c.m[sd.SQL] = sd
+}
+
+// Invalidate invalidates statement description identified by sql. Does nothing if not found.
+func (c *UnlimitedCache) Invalidate(sql string) {
+ if sd, ok := c.m[sql]; ok {
+ delete(c.m, sql)
+ c.invalidStmts = append(c.invalidStmts, sd)
+ }
+}
+
+// InvalidateAll invalidates all statement descriptions.
+func (c *UnlimitedCache) InvalidateAll() {
+ for _, sd := range c.m {
+ c.invalidStmts = append(c.invalidStmts, sd)
+ }
+
+ c.m = make(map[string]*pgconn.StatementDescription)
+}
+
+// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
+func (c *UnlimitedCache) GetInvalidated() []*pgconn.StatementDescription {
+ return c.invalidStmts
+}
+
+// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
+// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
+// never seen by the call to GetInvalidated.
+func (c *UnlimitedCache) RemoveInvalidated() {
+ c.invalidStmts = nil
+}
+
+// Len returns the number of cached prepared statement descriptions.
+func (c *UnlimitedCache) Len() int {
+ return len(c.m)
+}
+
+// Cap returns the maximum number of cached prepared statement descriptions.
+func (c *UnlimitedCache) Cap() int {
+ return math.MaxInt
+}
diff --git a/vendor/github.com/jackc/pgx/v5/large_objects.go b/vendor/github.com/jackc/pgx/v5/large_objects.go
new file mode 100644
index 0000000..9d21afd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/large_objects.go
@@ -0,0 +1,161 @@
+package pgx
+
+import (
+ "context"
+ "errors"
+ "io"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// The PostgreSQL wire protocol has a limit of 1 GB - 1 per message. See definition of
+// PQ_LARGE_MESSAGE_LIMIT in the PostgreSQL source code. To allow for the other data
+// in the message,maxLargeObjectMessageLength should be no larger than 1 GB - 1 KB.
+var maxLargeObjectMessageLength = 1024*1024*1024 - 1024
+
+// LargeObjects is a structure used to access the large objects API. It is only valid within the transaction where it
+// was created.
+//
+// For more details see: http://www.postgresql.org/docs/current/static/largeobjects.html
+type LargeObjects struct {
+ tx Tx
+}
+
+type LargeObjectMode int32
+
+const (
+ LargeObjectModeWrite LargeObjectMode = 0x20000
+ LargeObjectModeRead LargeObjectMode = 0x40000
+)
+
+// Create creates a new large object. If oid is zero, the server assigns an unused OID.
+func (o *LargeObjects) Create(ctx context.Context, oid uint32) (uint32, error) {
+ err := o.tx.QueryRow(ctx, "select lo_create($1)", oid).Scan(&oid)
+ return oid, err
+}
+
+// Open opens an existing large object with the given mode. ctx will also be used for all operations on the opened large
+// object.
+func (o *LargeObjects) Open(ctx context.Context, oid uint32, mode LargeObjectMode) (*LargeObject, error) {
+ var fd int32
+ err := o.tx.QueryRow(ctx, "select lo_open($1, $2)", oid, mode).Scan(&fd)
+ if err != nil {
+ return nil, err
+ }
+ return &LargeObject{fd: fd, tx: o.tx, ctx: ctx}, nil
+}
+
+// Unlink removes a large object from the database.
+func (o *LargeObjects) Unlink(ctx context.Context, oid uint32) error {
+ var result int32
+ err := o.tx.QueryRow(ctx, "select lo_unlink($1)", oid).Scan(&result)
+ if err != nil {
+ return err
+ }
+
+ if result != 1 {
+ return errors.New("failed to remove large object")
+ }
+
+ return nil
+}
+
+// A LargeObject is a large object stored on the server. It is only valid within the transaction that it was initialized
+// in. It uses the context it was initialized with for all operations. It implements these interfaces:
+//
+// io.Writer
+// io.Reader
+// io.Seeker
+// io.Closer
+type LargeObject struct {
+ ctx context.Context
+ tx Tx
+ fd int32
+}
+
+// Write writes p to the large object and returns the number of bytes written and an error if not all of p was written.
+func (o *LargeObject) Write(p []byte) (int, error) {
+ nTotal := 0
+ for {
+ expected := len(p) - nTotal
+ if expected == 0 {
+ break
+ } else if expected > maxLargeObjectMessageLength {
+ expected = maxLargeObjectMessageLength
+ }
+
+ var n int
+ err := o.tx.QueryRow(o.ctx, "select lowrite($1, $2)", o.fd, p[nTotal:nTotal+expected]).Scan(&n)
+ if err != nil {
+ return nTotal, err
+ }
+
+ if n < 0 {
+ return nTotal, errors.New("failed to write to large object")
+ }
+
+ nTotal += n
+
+ if n < expected {
+ return nTotal, errors.New("short write to large object")
+ } else if n > expected {
+ return nTotal, errors.New("invalid write to large object")
+ }
+ }
+
+ return nTotal, nil
+}
+
+// Read reads up to len(p) bytes into p returning the number of bytes read.
+func (o *LargeObject) Read(p []byte) (int, error) {
+ nTotal := 0
+ for {
+ expected := len(p) - nTotal
+ if expected == 0 {
+ break
+ } else if expected > maxLargeObjectMessageLength {
+ expected = maxLargeObjectMessageLength
+ }
+
+ res := pgtype.PreallocBytes(p[nTotal:])
+ err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, expected).Scan(&res)
+ // We compute expected so that it always fits into p, so it should never happen
+ // that PreallocBytes's ScanBytes had to allocate a new slice.
+ nTotal += len(res)
+ if err != nil {
+ return nTotal, err
+ }
+
+ if len(res) < expected {
+ return nTotal, io.EOF
+ } else if len(res) > expected {
+ return nTotal, errors.New("invalid read of large object")
+ }
+ }
+
+ return nTotal, nil
+}
+
+// Seek moves the current location pointer to the new location specified by offset.
+func (o *LargeObject) Seek(offset int64, whence int) (n int64, err error) {
+ err = o.tx.QueryRow(o.ctx, "select lo_lseek64($1, $2, $3)", o.fd, offset, whence).Scan(&n)
+ return n, err
+}
+
+// Tell returns the current read or write location of the large object descriptor.
+func (o *LargeObject) Tell() (n int64, err error) {
+ err = o.tx.QueryRow(o.ctx, "select lo_tell64($1)", o.fd).Scan(&n)
+ return n, err
+}
+
+// Truncate the large object to size.
+func (o *LargeObject) Truncate(size int64) (err error) {
+ _, err = o.tx.Exec(o.ctx, "select lo_truncate64($1, $2)", o.fd, size)
+ return err
+}
+
+// Close the large object descriptor.
+func (o *LargeObject) Close() error {
+ _, err := o.tx.Exec(o.ctx, "select lo_close($1)", o.fd)
+ return err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/named_args.go b/vendor/github.com/jackc/pgx/v5/named_args.go
new file mode 100644
index 0000000..c88991e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/named_args.go
@@ -0,0 +1,295 @@
+package pgx
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// NamedArgs can be used as the first argument to a query method. It will replace every '@' named placeholder with a '$'
+// ordinal placeholder and construct the appropriate arguments.
+//
+// For example, the following two queries are equivalent:
+//
+// conn.Query(ctx, "select * from widgets where foo = @foo and bar = @bar", pgx.NamedArgs{"foo": 1, "bar": 2})
+// conn.Query(ctx, "select * from widgets where foo = $1 and bar = $2", 1, 2)
+//
+// Named placeholders are case sensitive and must start with a letter or underscore. Subsequent characters can be
+// letters, numbers, or underscores.
+type NamedArgs map[string]any
+
+// RewriteQuery implements the QueryRewriter interface.
+func (na NamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
+ return rewriteQuery(na, sql, false)
+}
+
+// StrictNamedArgs can be used in the same way as NamedArgs, but provided arguments are also checked to include all
+// named arguments that the sql query uses, and no extra arguments.
+type StrictNamedArgs map[string]any
+
+// RewriteQuery implements the QueryRewriter interface.
+func (sna StrictNamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
+ return rewriteQuery(sna, sql, true)
+}
+
+type namedArg string
+
+type sqlLexer struct {
+ src string
+ start int
+ pos int
+ nested int // multiline comment nesting level.
+ stateFn stateFn
+ parts []any
+
+ nameToOrdinal map[namedArg]int
+}
+
+type stateFn func(*sqlLexer) stateFn
+
+func rewriteQuery(na map[string]any, sql string, isStrict bool) (newSQL string, newArgs []any, err error) {
+ l := &sqlLexer{
+ src: sql,
+ stateFn: rawState,
+ nameToOrdinal: make(map[namedArg]int, len(na)),
+ }
+
+ for l.stateFn != nil {
+ l.stateFn = l.stateFn(l)
+ }
+
+ sb := strings.Builder{}
+ for _, p := range l.parts {
+ switch p := p.(type) {
+ case string:
+ sb.WriteString(p)
+ case namedArg:
+ sb.WriteRune('$')
+ sb.WriteString(strconv.Itoa(l.nameToOrdinal[p]))
+ }
+ }
+
+ newArgs = make([]any, len(l.nameToOrdinal))
+ for name, ordinal := range l.nameToOrdinal {
+ var found bool
+ newArgs[ordinal-1], found = na[string(name)]
+ if isStrict && !found {
+ return "", nil, fmt.Errorf("argument %s found in sql query but not present in StrictNamedArgs", name)
+ }
+ }
+
+ if isStrict {
+ for name := range na {
+ if _, found := l.nameToOrdinal[namedArg(name)]; !found {
+ return "", nil, fmt.Errorf("argument %s of StrictNamedArgs not found in sql query", name)
+ }
+ }
+ }
+
+ return sb.String(), newArgs, nil
+}
+
+func rawState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case 'e', 'E':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '\'' {
+ l.pos += width
+ return escapeStringState
+ }
+ case '\'':
+ return singleQuoteState
+ case '"':
+ return doubleQuoteState
+ case '@':
+ nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
+ if isLetter(nextRune) || nextRune == '_' {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos-width])
+ }
+ l.start = l.pos
+ return namedArgState
+ }
+ case '-':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '-' {
+ l.pos += width
+ return oneLineCommentState
+ }
+ case '/':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '*' {
+ l.pos += width
+ return multilineCommentState
+ }
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func isLetter(r rune) bool {
+ return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z')
+}
+
+func namedArgState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ if r == utf8.RuneError {
+ if l.pos-l.start > 0 {
+ na := namedArg(l.src[l.start:l.pos])
+ if _, found := l.nameToOrdinal[na]; !found {
+ l.nameToOrdinal[na] = len(l.nameToOrdinal) + 1
+ }
+ l.parts = append(l.parts, na)
+ l.start = l.pos
+ }
+ return nil
+ } else if !(isLetter(r) || (r >= '0' && r <= '9') || r == '_') {
+ l.pos -= width
+ na := namedArg(l.src[l.start:l.pos])
+ if _, found := l.nameToOrdinal[na]; !found {
+ l.nameToOrdinal[na] = len(l.nameToOrdinal) + 1
+ }
+ l.parts = append(l.parts, namedArg(na))
+ l.start = l.pos
+ return rawState
+ }
+ }
+}
+
+func singleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func doubleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '"':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '"' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func escapeStringState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func oneLineCommentState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\n', '\r':
+ return rawState
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func multilineCommentState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '/':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '*' {
+ l.pos += width
+ l.nested++
+ }
+ case '*':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '/' {
+ continue
+ }
+
+ l.pos += width
+ if l.nested == 0 {
+ return rawState
+ }
+ l.nested--
+
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/README.md b/vendor/github.com/jackc/pgx/v5/pgconn/README.md
new file mode 100644
index 0000000..1fe15c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/README.md
@@ -0,0 +1,29 @@
+# pgconn
+
+Package pgconn is a low-level PostgreSQL database driver. It operates at nearly the same level as the C library libpq.
+It is primarily intended to serve as the foundation for higher level libraries such as https://github.com/jackc/pgx.
+Applications should handle normal queries with a higher level library and only use pgconn directly when required for
+low-level access to PostgreSQL functionality.
+
+## Example Usage
+
+```go
+pgConn, err := pgconn.Connect(context.Background(), os.Getenv("DATABASE_URL"))
+if err != nil {
+ log.Fatalln("pgconn failed to connect:", err)
+}
+defer pgConn.Close(context.Background())
+
+result := pgConn.ExecParams(context.Background(), "SELECT email FROM users WHERE id=$1", [][]byte{[]byte("123")}, nil, nil, nil)
+for result.NextRow() {
+ fmt.Println("User 123 has email:", string(result.Values()[0]))
+}
+_, err = result.Close()
+if err != nil {
+ log.Fatalln("failed reading result:", err)
+}
+```
+
+## Testing
+
+See CONTRIBUTING.md for setup instructions.
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go b/vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go
new file mode 100644
index 0000000..0649836
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go
@@ -0,0 +1,272 @@
+// SCRAM-SHA-256 authentication
+//
+// Resources:
+// https://tools.ietf.org/html/rfc5802
+// https://tools.ietf.org/html/rfc8265
+// https://www.postgresql.org/docs/current/sasl-authentication.html
+//
+// Inspiration drawn from other implementations:
+// https://github.com/lib/pq/pull/608
+// https://github.com/lib/pq/pull/788
+// https://github.com/lib/pq/pull/833
+
+package pgconn
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/pgproto3"
+ "golang.org/x/crypto/pbkdf2"
+ "golang.org/x/text/secure/precis"
+)
+
+const clientNonceLen = 18
+
+// Perform SCRAM authentication.
+func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
+ sc, err := newScramClient(serverAuthMechanisms, c.config.Password)
+ if err != nil {
+ return err
+ }
+
+ // Send client-first-message in a SASLInitialResponse
+ saslInitialResponse := &pgproto3.SASLInitialResponse{
+ AuthMechanism: "SCRAM-SHA-256",
+ Data: sc.clientFirstMessage(),
+ }
+ c.frontend.Send(saslInitialResponse)
+ err = c.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ return err
+ }
+
+ // Receive server-first-message payload in an AuthenticationSASLContinue.
+ saslContinue, err := c.rxSASLContinue()
+ if err != nil {
+ return err
+ }
+ err = sc.recvServerFirstMessage(saslContinue.Data)
+ if err != nil {
+ return err
+ }
+
+ // Send client-final-message in a SASLResponse
+ saslResponse := &pgproto3.SASLResponse{
+ Data: []byte(sc.clientFinalMessage()),
+ }
+ c.frontend.Send(saslResponse)
+ err = c.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ return err
+ }
+
+ // Receive server-final-message payload in an AuthenticationSASLFinal.
+ saslFinal, err := c.rxSASLFinal()
+ if err != nil {
+ return err
+ }
+ return sc.recvServerFinalMessage(saslFinal.Data)
+}
+
+func (c *PgConn) rxSASLContinue() (*pgproto3.AuthenticationSASLContinue, error) {
+ msg, err := c.receiveMessage()
+ if err != nil {
+ return nil, err
+ }
+ switch m := msg.(type) {
+ case *pgproto3.AuthenticationSASLContinue:
+ return m, nil
+ case *pgproto3.ErrorResponse:
+ return nil, ErrorResponseToPgError(m)
+ }
+
+ return nil, fmt.Errorf("expected AuthenticationSASLContinue message but received unexpected message %T", msg)
+}
+
+func (c *PgConn) rxSASLFinal() (*pgproto3.AuthenticationSASLFinal, error) {
+ msg, err := c.receiveMessage()
+ if err != nil {
+ return nil, err
+ }
+ switch m := msg.(type) {
+ case *pgproto3.AuthenticationSASLFinal:
+ return m, nil
+ case *pgproto3.ErrorResponse:
+ return nil, ErrorResponseToPgError(m)
+ }
+
+ return nil, fmt.Errorf("expected AuthenticationSASLFinal message but received unexpected message %T", msg)
+}
+
+type scramClient struct {
+ serverAuthMechanisms []string
+ password []byte
+ clientNonce []byte
+
+ clientFirstMessageBare []byte
+
+ serverFirstMessage []byte
+ clientAndServerNonce []byte
+ salt []byte
+ iterations int
+
+ saltedPassword []byte
+ authMessage []byte
+}
+
+func newScramClient(serverAuthMechanisms []string, password string) (*scramClient, error) {
+ sc := &scramClient{
+ serverAuthMechanisms: serverAuthMechanisms,
+ }
+
+ // Ensure server supports SCRAM-SHA-256
+ hasScramSHA256 := false
+ for _, mech := range sc.serverAuthMechanisms {
+ if mech == "SCRAM-SHA-256" {
+ hasScramSHA256 = true
+ break
+ }
+ }
+ if !hasScramSHA256 {
+ return nil, errors.New("server does not support SCRAM-SHA-256")
+ }
+
+ // precis.OpaqueString is equivalent to SASLprep for password.
+ var err error
+ sc.password, err = precis.OpaqueString.Bytes([]byte(password))
+ if err != nil {
+ // PostgreSQL allows passwords invalid according to SCRAM / SASLprep.
+ sc.password = []byte(password)
+ }
+
+ buf := make([]byte, clientNonceLen)
+ _, err = rand.Read(buf)
+ if err != nil {
+ return nil, err
+ }
+ sc.clientNonce = make([]byte, base64.RawStdEncoding.EncodedLen(len(buf)))
+ base64.RawStdEncoding.Encode(sc.clientNonce, buf)
+
+ return sc, nil
+}
+
+func (sc *scramClient) clientFirstMessage() []byte {
+ sc.clientFirstMessageBare = []byte(fmt.Sprintf("n=,r=%s", sc.clientNonce))
+ return []byte(fmt.Sprintf("n,,%s", sc.clientFirstMessageBare))
+}
+
+func (sc *scramClient) recvServerFirstMessage(serverFirstMessage []byte) error {
+ sc.serverFirstMessage = serverFirstMessage
+ buf := serverFirstMessage
+ if !bytes.HasPrefix(buf, []byte("r=")) {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include r=")
+ }
+ buf = buf[2:]
+
+ idx := bytes.IndexByte(buf, ',')
+ if idx == -1 {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include s=")
+ }
+ sc.clientAndServerNonce = buf[:idx]
+ buf = buf[idx+1:]
+
+ if !bytes.HasPrefix(buf, []byte("s=")) {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include s=")
+ }
+ buf = buf[2:]
+
+ idx = bytes.IndexByte(buf, ',')
+ if idx == -1 {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include i=")
+ }
+ saltStr := buf[:idx]
+ buf = buf[idx+1:]
+
+ if !bytes.HasPrefix(buf, []byte("i=")) {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include i=")
+ }
+ buf = buf[2:]
+ iterationsStr := buf
+
+ var err error
+ sc.salt, err = base64.StdEncoding.DecodeString(string(saltStr))
+ if err != nil {
+ return fmt.Errorf("invalid SCRAM salt received from server: %w", err)
+ }
+
+ sc.iterations, err = strconv.Atoi(string(iterationsStr))
+ if err != nil || sc.iterations <= 0 {
+ return fmt.Errorf("invalid SCRAM iteration count received from server: %w", err)
+ }
+
+ if !bytes.HasPrefix(sc.clientAndServerNonce, sc.clientNonce) {
+ return errors.New("invalid SCRAM nonce: did not start with client nonce")
+ }
+
+ if len(sc.clientAndServerNonce) <= len(sc.clientNonce) {
+ return errors.New("invalid SCRAM nonce: did not include server nonce")
+ }
+
+ return nil
+}
+
+func (sc *scramClient) clientFinalMessage() string {
+ clientFinalMessageWithoutProof := []byte(fmt.Sprintf("c=biws,r=%s", sc.clientAndServerNonce))
+
+ sc.saltedPassword = pbkdf2.Key([]byte(sc.password), sc.salt, sc.iterations, 32, sha256.New)
+ sc.authMessage = bytes.Join([][]byte{sc.clientFirstMessageBare, sc.serverFirstMessage, clientFinalMessageWithoutProof}, []byte(","))
+
+ clientProof := computeClientProof(sc.saltedPassword, sc.authMessage)
+
+ return fmt.Sprintf("%s,p=%s", clientFinalMessageWithoutProof, clientProof)
+}
+
+func (sc *scramClient) recvServerFinalMessage(serverFinalMessage []byte) error {
+ if !bytes.HasPrefix(serverFinalMessage, []byte("v=")) {
+ return errors.New("invalid SCRAM server-final-message received from server")
+ }
+
+ serverSignature := serverFinalMessage[2:]
+
+ if !hmac.Equal(serverSignature, computeServerSignature(sc.saltedPassword, sc.authMessage)) {
+ return errors.New("invalid SCRAM ServerSignature received from server")
+ }
+
+ return nil
+}
+
+func computeHMAC(key, msg []byte) []byte {
+ mac := hmac.New(sha256.New, key)
+ mac.Write(msg)
+ return mac.Sum(nil)
+}
+
+func computeClientProof(saltedPassword, authMessage []byte) []byte {
+ clientKey := computeHMAC(saltedPassword, []byte("Client Key"))
+ storedKey := sha256.Sum256(clientKey)
+ clientSignature := computeHMAC(storedKey[:], authMessage)
+
+ clientProof := make([]byte, len(clientSignature))
+ for i := 0; i < len(clientSignature); i++ {
+ clientProof[i] = clientKey[i] ^ clientSignature[i]
+ }
+
+ buf := make([]byte, base64.StdEncoding.EncodedLen(len(clientProof)))
+ base64.StdEncoding.Encode(buf, clientProof)
+ return buf
+}
+
+func computeServerSignature(saltedPassword []byte, authMessage []byte) []byte {
+ serverKey := computeHMAC(saltedPassword, []byte("Server Key"))
+ serverSignature := computeHMAC(serverKey, authMessage)
+ buf := make([]byte, base64.StdEncoding.EncodedLen(len(serverSignature)))
+ base64.StdEncoding.Encode(buf, serverSignature)
+ return buf
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/config.go b/vendor/github.com/jackc/pgx/v5/pgconn/config.go
new file mode 100644
index 0000000..1c28c40
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/config.go
@@ -0,0 +1,951 @@
+package pgconn
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgpassfile"
+ "github.com/jackc/pgservicefile"
+ "github.com/jackc/pgx/v5/pgconn/ctxwatch"
+ "github.com/jackc/pgx/v5/pgproto3"
+)
+
+type AfterConnectFunc func(ctx context.Context, pgconn *PgConn) error
+type ValidateConnectFunc func(ctx context.Context, pgconn *PgConn) error
+type GetSSLPasswordFunc func(ctx context.Context) string
+
+// Config is the settings used to establish a connection to a PostgreSQL server. It must be created by [ParseConfig]. A
+// manually initialized Config will cause ConnectConfig to panic.
+type Config struct {
+ Host string // host (e.g. localhost) or absolute path to unix domain socket directory (e.g. /private/tmp)
+ Port uint16
+ Database string
+ User string
+ Password string
+ TLSConfig *tls.Config // nil disables TLS
+ ConnectTimeout time.Duration
+ DialFunc DialFunc // e.g. net.Dialer.DialContext
+ LookupFunc LookupFunc // e.g. net.Resolver.LookupHost
+ BuildFrontend BuildFrontendFunc
+
+ // BuildContextWatcherHandler is called to create a ContextWatcherHandler for a connection. The handler is called
+ // when a context passed to a PgConn method is canceled.
+ BuildContextWatcherHandler func(*PgConn) ctxwatch.Handler
+
+ RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
+
+ KerberosSrvName string
+ KerberosSpn string
+ Fallbacks []*FallbackConfig
+
+ SSLNegotiation string // sslnegotiation=postgres or sslnegotiation=direct
+
+ // ValidateConnect is called during a connection attempt after a successful authentication with the PostgreSQL server.
+ // It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next
+ // fallback config is tried. This allows implementing high availability behavior such as libpq does with target_session_attrs.
+ ValidateConnect ValidateConnectFunc
+
+ // AfterConnect is called after ValidateConnect. It can be used to set up the connection (e.g. Set session variables
+ // or prepare statements). If this returns an error the connection attempt fails.
+ AfterConnect AfterConnectFunc
+
+ // OnNotice is a callback function called when a notice response is received.
+ OnNotice NoticeHandler
+
+ // OnNotification is a callback function called when a notification from the LISTEN/NOTIFY system is received.
+ OnNotification NotificationHandler
+
+ // OnPgError is a callback function called when a Postgres error is received by the server. The default handler will close
+ // the connection on any FATAL errors. If you override this handler you should call the previously set handler or ensure
+ // that you close on FATAL errors by returning false.
+ OnPgError PgErrorHandler
+
+ createdByParseConfig bool // Used to enforce created by ParseConfig rule.
+}
+
+// ParseConfigOptions contains options that control how a config is built such as GetSSLPassword.
+type ParseConfigOptions struct {
+ // GetSSLPassword gets the password to decrypt a SSL client certificate. This is analogous to the libpq function
+ // PQsetSSLKeyPassHook_OpenSSL.
+ GetSSLPassword GetSSLPasswordFunc
+}
+
+// Copy returns a deep copy of the config that is safe to use and modify.
+// The only exception is the TLSConfig field:
+// according to the tls.Config docs it must not be modified after creation.
+func (c *Config) Copy() *Config {
+ newConf := new(Config)
+ *newConf = *c
+ if newConf.TLSConfig != nil {
+ newConf.TLSConfig = c.TLSConfig.Clone()
+ }
+ if newConf.RuntimeParams != nil {
+ newConf.RuntimeParams = make(map[string]string, len(c.RuntimeParams))
+ for k, v := range c.RuntimeParams {
+ newConf.RuntimeParams[k] = v
+ }
+ }
+ if newConf.Fallbacks != nil {
+ newConf.Fallbacks = make([]*FallbackConfig, len(c.Fallbacks))
+ for i, fallback := range c.Fallbacks {
+ newFallback := new(FallbackConfig)
+ *newFallback = *fallback
+ if newFallback.TLSConfig != nil {
+ newFallback.TLSConfig = fallback.TLSConfig.Clone()
+ }
+ newConf.Fallbacks[i] = newFallback
+ }
+ }
+ return newConf
+}
+
+// FallbackConfig is additional settings to attempt a connection with when the primary Config fails to establish a
+// network connection. It is used for TLS fallback such as sslmode=prefer and high availability (HA) connections.
+type FallbackConfig struct {
+ Host string // host (e.g. localhost) or path to unix domain socket directory (e.g. /private/tmp)
+ Port uint16
+ TLSConfig *tls.Config // nil disables TLS
+}
+
+// connectOneConfig is the configuration for a single attempt to connect to a single host.
+type connectOneConfig struct {
+ network string
+ address string
+ originalHostname string // original hostname before resolving
+ tlsConfig *tls.Config // nil disables TLS
+}
+
+// isAbsolutePath checks if the provided value is an absolute path either
+// beginning with a forward slash (as on Linux-based systems) or with a capital
+// letter A-Z followed by a colon and a backslash, e.g., "C:\", (as on Windows).
+func isAbsolutePath(path string) bool {
+ isWindowsPath := func(p string) bool {
+ if len(p) < 3 {
+ return false
+ }
+ drive := p[0]
+ colon := p[1]
+ backslash := p[2]
+ if drive >= 'A' && drive <= 'Z' && colon == ':' && backslash == '\\' {
+ return true
+ }
+ return false
+ }
+ return strings.HasPrefix(path, "/") || isWindowsPath(path)
+}
+
+// NetworkAddress converts a PostgreSQL host and port into network and address suitable for use with
+// net.Dial.
+func NetworkAddress(host string, port uint16) (network, address string) {
+ if isAbsolutePath(host) {
+ network = "unix"
+ address = filepath.Join(host, ".s.PGSQL.") + strconv.FormatInt(int64(port), 10)
+ } else {
+ network = "tcp"
+ address = net.JoinHostPort(host, strconv.Itoa(int(port)))
+ }
+ return network, address
+}
+
+// ParseConfig builds a *Config from connString with similar behavior to the PostgreSQL standard C library libpq. It
+// uses the same defaults as libpq (e.g. port=5432) and understands most PG* environment variables. ParseConfig closely
+// matches the parsing behavior of libpq. connString may either be in URL format or keyword = value format. See
+// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING for details. connString also may be empty
+// to only read from the environment. If a password is not supplied it will attempt to read the .pgpass file.
+//
+// # Example Keyword/Value
+// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca
+//
+// # Example URL
+// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca
+//
+// The returned *Config may be modified. However, it is strongly recommended that any configuration that can be done
+// through the connection string be done there. In particular the fields Host, Port, TLSConfig, and Fallbacks can be
+// interdependent (e.g. TLSConfig needs knowledge of the host to validate the server certificate). These fields should
+// not be modified individually. They should all be modified or all left unchanged.
+//
+// ParseConfig supports specifying multiple hosts in similar manner to libpq. Host and port may include comma separated
+// values that will be tried in order. This can be used as part of a high availability system. See
+// https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS for more information.
+//
+// # Example URL
+// postgres://jack:secret@foo.example.com:5432,bar.example.com:5432/mydb
+//
+// ParseConfig currently recognizes the following environment variable and their parameter key word equivalents passed
+// via database URL or keyword/value:
+//
+// PGHOST
+// PGPORT
+// PGDATABASE
+// PGUSER
+// PGPASSWORD
+// PGPASSFILE
+// PGSERVICE
+// PGSERVICEFILE
+// PGSSLMODE
+// PGSSLCERT
+// PGSSLKEY
+// PGSSLROOTCERT
+// PGSSLPASSWORD
+// PGOPTIONS
+// PGAPPNAME
+// PGCONNECT_TIMEOUT
+// PGTARGETSESSIONATTRS
+// PGTZ
+//
+// See http://www.postgresql.org/docs/11/static/libpq-envars.html for details on the meaning of environment variables.
+//
+// See https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-PARAMKEYWORDS for parameter key word names. They are
+// usually but not always the environment variable name downcased and without the "PG" prefix.
+//
+// Important Security Notes:
+//
+// ParseConfig tries to match libpq behavior with regard to PGSSLMODE. This includes defaulting to "prefer" behavior if
+// not set.
+//
+// See http://www.postgresql.org/docs/11/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION for details on what level of
+// security each sslmode provides.
+//
+// The sslmode "prefer" (the default), sslmode "allow", and multiple hosts are implemented via the Fallbacks field of
+// the Config struct. If TLSConfig is manually changed it will not affect the fallbacks. For example, in the case of
+// sslmode "prefer" this means it will first try the main Config settings which use TLS, then it will try the fallback
+// which does not use TLS. This can lead to an unexpected unencrypted connection if the main TLS config is manually
+// changed later but the unencrypted fallback is present. Ensure there are no stale fallbacks when manually setting
+// TLSConfig.
+//
+// Other known differences with libpq:
+//
+// When multiple hosts are specified, libpq allows them to have different passwords set via the .pgpass file. pgconn
+// does not.
+//
+// In addition, ParseConfig accepts the following options:
+//
+// - servicefile.
+// libpq only reads servicefile from the PGSERVICEFILE environment variable. ParseConfig accepts servicefile as a
+// part of the connection string.
+func ParseConfig(connString string) (*Config, error) {
+ var parseConfigOptions ParseConfigOptions
+ return ParseConfigWithOptions(connString, parseConfigOptions)
+}
+
+// ParseConfigWithOptions builds a *Config from connString and options with similar behavior to the PostgreSQL standard
+// C library libpq. options contains settings that cannot be specified in a connString such as providing a function to
+// get the SSL password.
+func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Config, error) {
+ defaultSettings := defaultSettings()
+ envSettings := parseEnvSettings()
+
+ connStringSettings := make(map[string]string)
+ if connString != "" {
+ var err error
+ // connString may be a database URL or in PostgreSQL keyword/value format
+ if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
+ connStringSettings, err = parseURLSettings(connString)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as URL", err: err}
+ }
+ } else {
+ connStringSettings, err = parseKeywordValueSettings(connString)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as keyword/value", err: err}
+ }
+ }
+ }
+
+ settings := mergeSettings(defaultSettings, envSettings, connStringSettings)
+ if service, present := settings["service"]; present {
+ serviceSettings, err := parseServiceSettings(settings["servicefile"], service)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "failed to read service", err: err}
+ }
+
+ settings = mergeSettings(defaultSettings, envSettings, serviceSettings, connStringSettings)
+ }
+
+ config := &Config{
+ createdByParseConfig: true,
+ Database: settings["database"],
+ User: settings["user"],
+ Password: settings["password"],
+ RuntimeParams: make(map[string]string),
+ BuildFrontend: func(r io.Reader, w io.Writer) *pgproto3.Frontend {
+ return pgproto3.NewFrontend(r, w)
+ },
+ BuildContextWatcherHandler: func(pgConn *PgConn) ctxwatch.Handler {
+ return &DeadlineContextWatcherHandler{Conn: pgConn.conn}
+ },
+ OnPgError: func(_ *PgConn, pgErr *PgError) bool {
+ // we want to automatically close any fatal errors
+ if strings.EqualFold(pgErr.Severity, "FATAL") {
+ return false
+ }
+ return true
+ },
+ }
+
+ if connectTimeoutSetting, present := settings["connect_timeout"]; present {
+ connectTimeout, err := parseConnectTimeoutSetting(connectTimeoutSetting)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "invalid connect_timeout", err: err}
+ }
+ config.ConnectTimeout = connectTimeout
+ config.DialFunc = makeConnectTimeoutDialFunc(connectTimeout)
+ } else {
+ defaultDialer := makeDefaultDialer()
+ config.DialFunc = defaultDialer.DialContext
+ }
+
+ config.LookupFunc = makeDefaultResolver().LookupHost
+
+ notRuntimeParams := map[string]struct{}{
+ "host": {},
+ "port": {},
+ "database": {},
+ "user": {},
+ "password": {},
+ "passfile": {},
+ "connect_timeout": {},
+ "sslmode": {},
+ "sslkey": {},
+ "sslcert": {},
+ "sslrootcert": {},
+ "sslnegotiation": {},
+ "sslpassword": {},
+ "sslsni": {},
+ "krbspn": {},
+ "krbsrvname": {},
+ "target_session_attrs": {},
+ "service": {},
+ "servicefile": {},
+ }
+
+ // Adding kerberos configuration
+ if _, present := settings["krbsrvname"]; present {
+ config.KerberosSrvName = settings["krbsrvname"]
+ }
+ if _, present := settings["krbspn"]; present {
+ config.KerberosSpn = settings["krbspn"]
+ }
+
+ for k, v := range settings {
+ if _, present := notRuntimeParams[k]; present {
+ continue
+ }
+ config.RuntimeParams[k] = v
+ }
+
+ fallbacks := []*FallbackConfig{}
+
+ hosts := strings.Split(settings["host"], ",")
+ ports := strings.Split(settings["port"], ",")
+
+ for i, host := range hosts {
+ var portStr string
+ if i < len(ports) {
+ portStr = ports[i]
+ } else {
+ portStr = ports[0]
+ }
+
+ port, err := parsePort(portStr)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "invalid port", err: err}
+ }
+
+ var tlsConfigs []*tls.Config
+
+ // Ignore TLS settings if Unix domain socket like libpq
+ if network, _ := NetworkAddress(host, port); network == "unix" {
+ tlsConfigs = append(tlsConfigs, nil)
+ } else {
+ var err error
+ tlsConfigs, err = configTLS(settings, host, options)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "failed to configure TLS", err: err}
+ }
+ }
+
+ for _, tlsConfig := range tlsConfigs {
+ fallbacks = append(fallbacks, &FallbackConfig{
+ Host: host,
+ Port: port,
+ TLSConfig: tlsConfig,
+ })
+ }
+ }
+
+ config.Host = fallbacks[0].Host
+ config.Port = fallbacks[0].Port
+ config.TLSConfig = fallbacks[0].TLSConfig
+ config.Fallbacks = fallbacks[1:]
+ config.SSLNegotiation = settings["sslnegotiation"]
+
+ passfile, err := pgpassfile.ReadPassfile(settings["passfile"])
+ if err == nil {
+ if config.Password == "" {
+ host := config.Host
+ if network, _ := NetworkAddress(config.Host, config.Port); network == "unix" {
+ host = "localhost"
+ }
+
+ config.Password = passfile.FindPassword(host, strconv.Itoa(int(config.Port)), config.Database, config.User)
+ }
+ }
+
+ switch tsa := settings["target_session_attrs"]; tsa {
+ case "read-write":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsReadWrite
+ case "read-only":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsReadOnly
+ case "primary":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsPrimary
+ case "standby":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsStandby
+ case "prefer-standby":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsPreferStandby
+ case "any":
+ // do nothing
+ default:
+ return nil, &ParseConfigError{ConnString: connString, msg: fmt.Sprintf("unknown target_session_attrs value: %v", tsa)}
+ }
+
+ return config, nil
+}
+
+func mergeSettings(settingSets ...map[string]string) map[string]string {
+ settings := make(map[string]string)
+
+ for _, s2 := range settingSets {
+ for k, v := range s2 {
+ settings[k] = v
+ }
+ }
+
+ return settings
+}
+
+func parseEnvSettings() map[string]string {
+ settings := make(map[string]string)
+
+ nameMap := map[string]string{
+ "PGHOST": "host",
+ "PGPORT": "port",
+ "PGDATABASE": "database",
+ "PGUSER": "user",
+ "PGPASSWORD": "password",
+ "PGPASSFILE": "passfile",
+ "PGAPPNAME": "application_name",
+ "PGCONNECT_TIMEOUT": "connect_timeout",
+ "PGSSLMODE": "sslmode",
+ "PGSSLKEY": "sslkey",
+ "PGSSLCERT": "sslcert",
+ "PGSSLSNI": "sslsni",
+ "PGSSLROOTCERT": "sslrootcert",
+ "PGSSLPASSWORD": "sslpassword",
+ "PGSSLNEGOTIATION": "sslnegotiation",
+ "PGTARGETSESSIONATTRS": "target_session_attrs",
+ "PGSERVICE": "service",
+ "PGSERVICEFILE": "servicefile",
+ "PGTZ": "timezone",
+ "PGOPTIONS": "options",
+ }
+
+ for envname, realname := range nameMap {
+ value := os.Getenv(envname)
+ if value != "" {
+ settings[realname] = value
+ }
+ }
+
+ return settings
+}
+
+func parseURLSettings(connString string) (map[string]string, error) {
+ settings := make(map[string]string)
+
+ parsedURL, err := url.Parse(connString)
+ if err != nil {
+ if urlErr := new(url.Error); errors.As(err, &urlErr) {
+ return nil, urlErr.Err
+ }
+ return nil, err
+ }
+
+ if parsedURL.User != nil {
+ settings["user"] = parsedURL.User.Username()
+ if password, present := parsedURL.User.Password(); present {
+ settings["password"] = password
+ }
+ }
+
+ // Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.
+ var hosts []string
+ var ports []string
+ for _, host := range strings.Split(parsedURL.Host, ",") {
+ if host == "" {
+ continue
+ }
+ if isIPOnly(host) {
+ hosts = append(hosts, strings.Trim(host, "[]"))
+ continue
+ }
+ h, p, err := net.SplitHostPort(host)
+ if err != nil {
+ return nil, fmt.Errorf("failed to split host:port in '%s', err: %w", host, err)
+ }
+ if h != "" {
+ hosts = append(hosts, h)
+ }
+ if p != "" {
+ ports = append(ports, p)
+ }
+ }
+ if len(hosts) > 0 {
+ settings["host"] = strings.Join(hosts, ",")
+ }
+ if len(ports) > 0 {
+ settings["port"] = strings.Join(ports, ",")
+ }
+
+ database := strings.TrimLeft(parsedURL.Path, "/")
+ if database != "" {
+ settings["database"] = database
+ }
+
+ nameMap := map[string]string{
+ "dbname": "database",
+ }
+
+ for k, v := range parsedURL.Query() {
+ if k2, present := nameMap[k]; present {
+ k = k2
+ }
+
+ settings[k] = v[0]
+ }
+
+ return settings, nil
+}
+
+func isIPOnly(host string) bool {
+ return net.ParseIP(strings.Trim(host, "[]")) != nil || !strings.Contains(host, ":")
+}
+
+var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
+
+func parseKeywordValueSettings(s string) (map[string]string, error) {
+ settings := make(map[string]string)
+
+ nameMap := map[string]string{
+ "dbname": "database",
+ }
+
+ for len(s) > 0 {
+ var key, val string
+ eqIdx := strings.IndexRune(s, '=')
+ if eqIdx < 0 {
+ return nil, errors.New("invalid keyword/value")
+ }
+
+ key = strings.Trim(s[:eqIdx], " \t\n\r\v\f")
+ s = strings.TrimLeft(s[eqIdx+1:], " \t\n\r\v\f")
+ if len(s) == 0 {
+ } else if s[0] != '\'' {
+ end := 0
+ for ; end < len(s); end++ {
+ if asciiSpace[s[end]] == 1 {
+ break
+ }
+ if s[end] == '\\' {
+ end++
+ if end == len(s) {
+ return nil, errors.New("invalid backslash")
+ }
+ }
+ }
+ val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
+ if end == len(s) {
+ s = ""
+ } else {
+ s = s[end+1:]
+ }
+ } else { // quoted string
+ s = s[1:]
+ end := 0
+ for ; end < len(s); end++ {
+ if s[end] == '\'' {
+ break
+ }
+ if s[end] == '\\' {
+ end++
+ }
+ }
+ if end == len(s) {
+ return nil, errors.New("unterminated quoted string in connection info string")
+ }
+ val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
+ if end == len(s) {
+ s = ""
+ } else {
+ s = s[end+1:]
+ }
+ }
+
+ if k, ok := nameMap[key]; ok {
+ key = k
+ }
+
+ if key == "" {
+ return nil, errors.New("invalid keyword/value")
+ }
+
+ settings[key] = val
+ }
+
+ return settings, nil
+}
+
+func parseServiceSettings(servicefilePath, serviceName string) (map[string]string, error) {
+ servicefile, err := pgservicefile.ReadServicefile(servicefilePath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read service file: %v", servicefilePath)
+ }
+
+ service, err := servicefile.GetService(serviceName)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find service: %v", serviceName)
+ }
+
+ nameMap := map[string]string{
+ "dbname": "database",
+ }
+
+ settings := make(map[string]string, len(service.Settings))
+ for k, v := range service.Settings {
+ if k2, present := nameMap[k]; present {
+ k = k2
+ }
+ settings[k] = v
+ }
+
+ return settings, nil
+}
+
+// configTLS uses libpq's TLS parameters to construct []*tls.Config. It is
+// necessary to allow returning multiple TLS configs as sslmode "allow" and
+// "prefer" allow fallback.
+func configTLS(settings map[string]string, thisHost string, parseConfigOptions ParseConfigOptions) ([]*tls.Config, error) {
+ host := thisHost
+ sslmode := settings["sslmode"]
+ sslrootcert := settings["sslrootcert"]
+ sslcert := settings["sslcert"]
+ sslkey := settings["sslkey"]
+ sslpassword := settings["sslpassword"]
+ sslsni := settings["sslsni"]
+ sslnegotiation := settings["sslnegotiation"]
+
+ // Match libpq default behavior
+ if sslmode == "" {
+ sslmode = "prefer"
+ }
+ if sslsni == "" {
+ sslsni = "1"
+ }
+
+ tlsConfig := &tls.Config{}
+
+ if sslnegotiation == "direct" {
+ tlsConfig.NextProtos = []string{"postgresql"}
+ if sslmode == "prefer" {
+ sslmode = "require"
+ }
+ }
+
+ if sslrootcert != "" {
+ var caCertPool *x509.CertPool
+
+ if sslrootcert == "system" {
+ var err error
+
+ caCertPool, err = x509.SystemCertPool()
+ if err != nil {
+ return nil, fmt.Errorf("unable to load system certificate pool: %w", err)
+ }
+
+ sslmode = "verify-full"
+ } else {
+ caCertPool = x509.NewCertPool()
+
+ caPath := sslrootcert
+ caCert, err := os.ReadFile(caPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read CA file: %w", err)
+ }
+
+ if !caCertPool.AppendCertsFromPEM(caCert) {
+ return nil, errors.New("unable to add CA to cert pool")
+ }
+ }
+
+ tlsConfig.RootCAs = caCertPool
+ tlsConfig.ClientCAs = caCertPool
+ }
+
+ switch sslmode {
+ case "disable":
+ return []*tls.Config{nil}, nil
+ case "allow", "prefer":
+ tlsConfig.InsecureSkipVerify = true
+ case "require":
+ // According to PostgreSQL documentation, if a root CA file exists,
+ // the behavior of sslmode=require should be the same as that of verify-ca
+ //
+ // See https://www.postgresql.org/docs/12/libpq-ssl.html
+ if sslrootcert != "" {
+ goto nextCase
+ }
+ tlsConfig.InsecureSkipVerify = true
+ break
+ nextCase:
+ fallthrough
+ case "verify-ca":
+ // Don't perform the default certificate verification because it
+ // will verify the hostname. Instead, verify the server's
+ // certificate chain ourselves in VerifyPeerCertificate and
+ // ignore the server name. This emulates libpq's verify-ca
+ // behavior.
+ //
+ // See https://github.com/golang/go/issues/21971#issuecomment-332693931
+ // and https://pkg.go.dev/crypto/tls?tab=doc#example-Config-VerifyPeerCertificate
+ // for more info.
+ tlsConfig.InsecureSkipVerify = true
+ tlsConfig.VerifyPeerCertificate = func(certificates [][]byte, _ [][]*x509.Certificate) error {
+ certs := make([]*x509.Certificate, len(certificates))
+ for i, asn1Data := range certificates {
+ cert, err := x509.ParseCertificate(asn1Data)
+ if err != nil {
+ return errors.New("failed to parse certificate from server: " + err.Error())
+ }
+ certs[i] = cert
+ }
+
+ // Leave DNSName empty to skip hostname verification.
+ opts := x509.VerifyOptions{
+ Roots: tlsConfig.RootCAs,
+ Intermediates: x509.NewCertPool(),
+ }
+ // Skip the first cert because it's the leaf. All others
+ // are intermediates.
+ for _, cert := range certs[1:] {
+ opts.Intermediates.AddCert(cert)
+ }
+ _, err := certs[0].Verify(opts)
+ return err
+ }
+ case "verify-full":
+ tlsConfig.ServerName = host
+ default:
+ return nil, errors.New("sslmode is invalid")
+ }
+
+ if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") {
+ return nil, errors.New(`both "sslcert" and "sslkey" are required`)
+ }
+
+ if sslcert != "" && sslkey != "" {
+ buf, err := os.ReadFile(sslkey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read sslkey: %w", err)
+ }
+ block, _ := pem.Decode(buf)
+ if block == nil {
+ return nil, errors.New("failed to decode sslkey")
+ }
+ var pemKey []byte
+ var decryptedKey []byte
+ var decryptedError error
+ // If PEM is encrypted, attempt to decrypt using pass phrase
+ if x509.IsEncryptedPEMBlock(block) {
+ // Attempt decryption with pass phrase
+ // NOTE: only supports RSA (PKCS#1)
+ if sslpassword != "" {
+ decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword))
+ }
+ //if sslpassword not provided or has decryption error when use it
+ //try to find sslpassword with callback function
+ if sslpassword == "" || decryptedError != nil {
+ if parseConfigOptions.GetSSLPassword != nil {
+ sslpassword = parseConfigOptions.GetSSLPassword(context.Background())
+ }
+ if sslpassword == "" {
+ return nil, fmt.Errorf("unable to find sslpassword")
+ }
+ }
+ decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword))
+ // Should we also provide warning for PKCS#1 needed?
+ if decryptedError != nil {
+ return nil, fmt.Errorf("unable to decrypt key: %w", err)
+ }
+
+ pemBytes := pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: decryptedKey,
+ }
+ pemKey = pem.EncodeToMemory(&pemBytes)
+ } else {
+ pemKey = pem.EncodeToMemory(block)
+ }
+ certfile, err := os.ReadFile(sslcert)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read cert: %w", err)
+ }
+ cert, err := tls.X509KeyPair(certfile, pemKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load cert: %w", err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+
+ // Set Server Name Indication (SNI), if enabled by connection parameters.
+ // Per RFC 6066, do not set it if the host is a literal IP address (IPv4
+ // or IPv6).
+ if sslsni == "1" && net.ParseIP(host) == nil {
+ tlsConfig.ServerName = host
+ }
+
+ switch sslmode {
+ case "allow":
+ return []*tls.Config{nil, tlsConfig}, nil
+ case "prefer":
+ return []*tls.Config{tlsConfig, nil}, nil
+ case "require", "verify-ca", "verify-full":
+ return []*tls.Config{tlsConfig}, nil
+ default:
+ panic("BUG: bad sslmode should already have been caught")
+ }
+}
+
+func parsePort(s string) (uint16, error) {
+ port, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ if port < 1 || port > math.MaxUint16 {
+ return 0, errors.New("outside range")
+ }
+ return uint16(port), nil
+}
+
+func makeDefaultDialer() *net.Dialer {
+ // rely on GOLANG KeepAlive settings
+ return &net.Dialer{}
+}
+
+func makeDefaultResolver() *net.Resolver {
+ return net.DefaultResolver
+}
+
+func parseConnectTimeoutSetting(s string) (time.Duration, error) {
+ timeout, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ if timeout < 0 {
+ return 0, errors.New("negative timeout")
+ }
+ return time.Duration(timeout) * time.Second, nil
+}
+
+func makeConnectTimeoutDialFunc(timeout time.Duration) DialFunc {
+ d := makeDefaultDialer()
+ d.Timeout = timeout
+ return d.DialContext
+}
+
+// ValidateConnectTargetSessionAttrsReadWrite is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=read-write.
+func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgConn) error {
+ result, err := pgConn.Exec(ctx, "show transaction_read_only").ReadAll()
+ if err != nil {
+ return err
+ }
+
+ if string(result[0].Rows[0][0]) == "on" {
+ return errors.New("read only connection")
+ }
+
+ return nil
+}
+
+// ValidateConnectTargetSessionAttrsReadOnly is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=read-only.
+func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgConn) error {
+ result, err := pgConn.Exec(ctx, "show transaction_read_only").ReadAll()
+ if err != nil {
+ return err
+ }
+
+ if string(result[0].Rows[0][0]) != "on" {
+ return errors.New("connection is not read only")
+ }
+
+ return nil
+}
+
+// ValidateConnectTargetSessionAttrsStandby is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=standby.
+func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgConn) error {
+ result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
+ if err != nil {
+ return err
+ }
+
+ if string(result[0].Rows[0][0]) != "t" {
+ return errors.New("server is not in hot standby mode")
+ }
+
+ return nil
+}
+
+// ValidateConnectTargetSessionAttrsPrimary is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=primary.
+func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgConn) error {
+ result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
+ if err != nil {
+ return err
+ }
+
+ if string(result[0].Rows[0][0]) == "t" {
+ return errors.New("server is in standby mode")
+ }
+
+ return nil
+}
+
+// ValidateConnectTargetSessionAttrsPreferStandby is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=prefer-standby.
+func ValidateConnectTargetSessionAttrsPreferStandby(ctx context.Context, pgConn *PgConn) error {
+ result, err := pgConn.Exec(ctx, "select pg_is_in_recovery()").ReadAll()
+ if err != nil {
+ return err
+ }
+
+ if string(result[0].Rows[0][0]) != "t" {
+ return &NotPreferredError{err: errors.New("server is not in hot standby mode")}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go b/vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go
new file mode 100644
index 0000000..db8884e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go
@@ -0,0 +1,80 @@
+package ctxwatch
+
+import (
+ "context"
+ "sync"
+)
+
+// ContextWatcher watches a context and performs an action when the context is canceled. It can watch one context at a
+// time.
+type ContextWatcher struct {
+ handler Handler
+ unwatchChan chan struct{}
+
+ lock sync.Mutex
+ watchInProgress bool
+ onCancelWasCalled bool
+}
+
+// NewContextWatcher returns a ContextWatcher. onCancel will be called when a watched context is canceled.
+// OnUnwatchAfterCancel will be called when Unwatch is called and the watched context had already been canceled and
+// onCancel called.
+func NewContextWatcher(handler Handler) *ContextWatcher {
+ cw := &ContextWatcher{
+ handler: handler,
+ unwatchChan: make(chan struct{}),
+ }
+
+ return cw
+}
+
+// Watch starts watching ctx. If ctx is canceled then the onCancel function passed to NewContextWatcher will be called.
+func (cw *ContextWatcher) Watch(ctx context.Context) {
+ cw.lock.Lock()
+ defer cw.lock.Unlock()
+
+ if cw.watchInProgress {
+ panic("Watch already in progress")
+ }
+
+ cw.onCancelWasCalled = false
+
+ if ctx.Done() != nil {
+ cw.watchInProgress = true
+ go func() {
+ select {
+ case <-ctx.Done():
+ cw.handler.HandleCancel(ctx)
+ cw.onCancelWasCalled = true
+ <-cw.unwatchChan
+ case <-cw.unwatchChan:
+ }
+ }()
+ } else {
+ cw.watchInProgress = false
+ }
+}
+
+// Unwatch stops watching the previously watched context. If the onCancel function passed to NewContextWatcher was
+// called then onUnwatchAfterCancel will also be called.
+func (cw *ContextWatcher) Unwatch() {
+ cw.lock.Lock()
+ defer cw.lock.Unlock()
+
+ if cw.watchInProgress {
+ cw.unwatchChan <- struct{}{}
+ if cw.onCancelWasCalled {
+ cw.handler.HandleUnwatchAfterCancel()
+ }
+ cw.watchInProgress = false
+ }
+}
+
+type Handler interface {
+ // HandleCancel is called when the context that a ContextWatcher is currently watching is canceled. canceledCtx is the
+ // context that was canceled.
+ HandleCancel(canceledCtx context.Context)
+
+ // HandleUnwatchAfterCancel is called when a ContextWatcher that called HandleCancel on this Handler is unwatched.
+ HandleUnwatchAfterCancel()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/defaults.go b/vendor/github.com/jackc/pgx/v5/pgconn/defaults.go
new file mode 100644
index 0000000..1dd514f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/defaults.go
@@ -0,0 +1,63 @@
+//go:build !windows
+// +build !windows
+
+package pgconn
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+)
+
+func defaultSettings() map[string]string {
+ settings := make(map[string]string)
+
+ settings["host"] = defaultHost()
+ settings["port"] = "5432"
+
+ // Default to the OS user name. Purposely ignoring err getting user name from
+ // OS. The client application will simply have to specify the user in that
+ // case (which they typically will be doing anyway).
+ user, err := user.Current()
+ if err == nil {
+ settings["user"] = user.Username
+ settings["passfile"] = filepath.Join(user.HomeDir, ".pgpass")
+ settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
+ sslcert := filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
+ sslkey := filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
+ if _, err := os.Stat(sslcert); err == nil {
+ if _, err := os.Stat(sslkey); err == nil {
+ // Both the cert and key must be present to use them, or do not use either
+ settings["sslcert"] = sslcert
+ settings["sslkey"] = sslkey
+ }
+ }
+ sslrootcert := filepath.Join(user.HomeDir, ".postgresql", "root.crt")
+ if _, err := os.Stat(sslrootcert); err == nil {
+ settings["sslrootcert"] = sslrootcert
+ }
+ }
+
+ settings["target_session_attrs"] = "any"
+
+ return settings
+}
+
+// defaultHost attempts to mimic libpq's default host. libpq uses the default unix socket location on *nix and localhost
+// on Windows. The default socket location is compiled into libpq. Since pgx does not have access to that default it
+// checks the existence of common locations.
+func defaultHost() string {
+ candidatePaths := []string{
+ "/var/run/postgresql", // Debian
+ "/private/tmp", // OSX - homebrew
+ "/tmp", // standard PostgreSQL
+ }
+
+ for _, path := range candidatePaths {
+ if _, err := os.Stat(path); err == nil {
+ return path
+ }
+ }
+
+ return "localhost"
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/defaults_windows.go b/vendor/github.com/jackc/pgx/v5/pgconn/defaults_windows.go
new file mode 100644
index 0000000..33b4a1f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/defaults_windows.go
@@ -0,0 +1,57 @@
+package pgconn
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+ "strings"
+)
+
+func defaultSettings() map[string]string {
+ settings := make(map[string]string)
+
+ settings["host"] = defaultHost()
+ settings["port"] = "5432"
+
+ // Default to the OS user name. Purposely ignoring err getting user name from
+ // OS. The client application will simply have to specify the user in that
+ // case (which they typically will be doing anyway).
+ user, err := user.Current()
+ appData := os.Getenv("APPDATA")
+ if err == nil {
+ // Windows gives us the username here as `DOMAIN\user` or `LOCALPCNAME\user`,
+ // but the libpq default is just the `user` portion, so we strip off the first part.
+ username := user.Username
+ if strings.Contains(username, "\\") {
+ username = username[strings.LastIndex(username, "\\")+1:]
+ }
+
+ settings["user"] = username
+ settings["passfile"] = filepath.Join(appData, "postgresql", "pgpass.conf")
+ settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
+ sslcert := filepath.Join(appData, "postgresql", "postgresql.crt")
+ sslkey := filepath.Join(appData, "postgresql", "postgresql.key")
+ if _, err := os.Stat(sslcert); err == nil {
+ if _, err := os.Stat(sslkey); err == nil {
+ // Both the cert and key must be present to use them, or do not use either
+ settings["sslcert"] = sslcert
+ settings["sslkey"] = sslkey
+ }
+ }
+ sslrootcert := filepath.Join(appData, "postgresql", "root.crt")
+ if _, err := os.Stat(sslrootcert); err == nil {
+ settings["sslrootcert"] = sslrootcert
+ }
+ }
+
+ settings["target_session_attrs"] = "any"
+
+ return settings
+}
+
+// defaultHost attempts to mimic libpq's default host. libpq uses the default unix socket location on *nix and localhost
+// on Windows. The default socket location is compiled into libpq. Since pgx does not have access to that default it
+// checks the existence of common locations.
+func defaultHost() string {
+ return "localhost"
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/doc.go b/vendor/github.com/jackc/pgx/v5/pgconn/doc.go
new file mode 100644
index 0000000..7013750
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/doc.go
@@ -0,0 +1,38 @@
+// Package pgconn is a low-level PostgreSQL database driver.
+/*
+pgconn provides lower level access to a PostgreSQL connection than a database/sql or pgx connection. It operates at
+nearly the same level is the C library libpq.
+
+Establishing a Connection
+
+Use Connect to establish a connection. It accepts a connection string in URL or keyword/value format and will read the
+environment for libpq style environment variables.
+
+Executing a Query
+
+ExecParams and ExecPrepared execute a single query. They return readers that iterate over each row. The Read method
+reads all rows into memory.
+
+Executing Multiple Queries in a Single Round Trip
+
+Exec and ExecBatch can execute multiple queries in a single round trip. They return readers that iterate over each query
+result. The ReadAll method reads all query results into memory.
+
+Pipeline Mode
+
+Pipeline mode allows sending queries without having read the results of previously sent queries. It allows control of
+exactly how many and when network round trips occur.
+
+Context Support
+
+All potentially blocking operations take a context.Context. The default behavior when a context is canceled is for the
+method to immediately return. In most circumstances, this will also close the underlying connection. This behavior can
+be customized by using BuildContextWatcherHandler on the Config to create a ctxwatch.Handler with different behavior.
+This can be especially useful when queries that are frequently canceled and the overhead of creating new connections is
+a problem. DeadlineContextWatcherHandler and CancelRequestContextWatcherHandler can be used to introduce a delay before
+interrupting the query in such a way as to close the connection.
+
+The CancelRequest method may be used to request the PostgreSQL server cancel an in-progress query without forcing the
+client to abort.
+*/
+package pgconn
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/errors.go b/vendor/github.com/jackc/pgx/v5/pgconn/errors.go
new file mode 100644
index 0000000..ec4a6d4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/errors.go
@@ -0,0 +1,248 @@
+package pgconn
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// SafeToRetry checks if the err is guaranteed to have occurred before sending any data to the server.
+func SafeToRetry(err error) bool {
+ var retryableErr interface{ SafeToRetry() bool }
+ if errors.As(err, &retryableErr) {
+ return retryableErr.SafeToRetry()
+ }
+ return false
+}
+
+// Timeout checks if err was caused by a timeout. To be specific, it is true if err was caused within pgconn by a
+// context.DeadlineExceeded or an implementer of net.Error where Timeout() is true.
+func Timeout(err error) bool {
+ var timeoutErr *errTimeout
+ return errors.As(err, &timeoutErr)
+}
+
+// PgError represents an error reported by the PostgreSQL server. See
+// http://www.postgresql.org/docs/11/static/protocol-error-fields.html for
+// detailed field description.
+type PgError struct {
+ Severity string
+ SeverityUnlocalized string
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+}
+
+func (pe *PgError) Error() string {
+ return pe.Severity + ": " + pe.Message + " (SQLSTATE " + pe.Code + ")"
+}
+
+// SQLState returns the SQLState of the error.
+func (pe *PgError) SQLState() string {
+ return pe.Code
+}
+
+// ConnectError is the error returned when a connection attempt fails.
+type ConnectError struct {
+ Config *Config // The configuration that was used in the connection attempt.
+ err error
+}
+
+func (e *ConnectError) Error() string {
+ prefix := fmt.Sprintf("failed to connect to `user=%s database=%s`:", e.Config.User, e.Config.Database)
+ details := e.err.Error()
+ if strings.Contains(details, "\n") {
+ return prefix + "\n\t" + strings.ReplaceAll(details, "\n", "\n\t")
+ } else {
+ return prefix + " " + details
+ }
+}
+
+func (e *ConnectError) Unwrap() error {
+ return e.err
+}
+
+type perDialConnectError struct {
+ address string
+ originalHostname string
+ err error
+}
+
+func (e *perDialConnectError) Error() string {
+ return fmt.Sprintf("%s (%s): %s", e.address, e.originalHostname, e.err.Error())
+}
+
+func (e *perDialConnectError) Unwrap() error {
+ return e.err
+}
+
+type connLockError struct {
+ status string
+}
+
+func (e *connLockError) SafeToRetry() bool {
+ return true // a lock failure by definition happens before the connection is used.
+}
+
+func (e *connLockError) Error() string {
+ return e.status
+}
+
+// ParseConfigError is the error returned when a connection string cannot be parsed.
+type ParseConfigError struct {
+ ConnString string // The connection string that could not be parsed.
+ msg string
+ err error
+}
+
+func (e *ParseConfigError) Error() string {
+ // Now that ParseConfigError is public and ConnString is available to the developer, perhaps it would be better only
+ // return a static string. That would ensure that the error message cannot leak a password. The ConnString field would
+ // allow access to the original string if desired and Unwrap would allow access to the underlying error.
+ connString := redactPW(e.ConnString)
+ if e.err == nil {
+ return fmt.Sprintf("cannot parse `%s`: %s", connString, e.msg)
+ }
+ return fmt.Sprintf("cannot parse `%s`: %s (%s)", connString, e.msg, e.err.Error())
+}
+
+func (e *ParseConfigError) Unwrap() error {
+ return e.err
+}
+
+func normalizeTimeoutError(ctx context.Context, err error) error {
+ var netErr net.Error
+ if errors.As(err, &netErr) && netErr.Timeout() {
+ if ctx.Err() == context.Canceled {
+ // Since the timeout was caused by a context cancellation, the actual error is context.Canceled not the timeout error.
+ return context.Canceled
+ } else if ctx.Err() == context.DeadlineExceeded {
+ return &errTimeout{err: ctx.Err()}
+ } else {
+ return &errTimeout{err: netErr}
+ }
+ }
+ return err
+}
+
+type pgconnError struct {
+ msg string
+ err error
+ safeToRetry bool
+}
+
+func (e *pgconnError) Error() string {
+ if e.msg == "" {
+ return e.err.Error()
+ }
+ if e.err == nil {
+ return e.msg
+ }
+ return fmt.Sprintf("%s: %s", e.msg, e.err.Error())
+}
+
+func (e *pgconnError) SafeToRetry() bool {
+ return e.safeToRetry
+}
+
+func (e *pgconnError) Unwrap() error {
+ return e.err
+}
+
+// errTimeout occurs when an error was caused by a timeout. Specifically, it wraps an error which is
+// context.Canceled, context.DeadlineExceeded, or an implementer of net.Error where Timeout() is true.
+type errTimeout struct {
+ err error
+}
+
+func (e *errTimeout) Error() string {
+ return fmt.Sprintf("timeout: %s", e.err.Error())
+}
+
+func (e *errTimeout) SafeToRetry() bool {
+ return SafeToRetry(e.err)
+}
+
+func (e *errTimeout) Unwrap() error {
+ return e.err
+}
+
+type contextAlreadyDoneError struct {
+ err error
+}
+
+func (e *contextAlreadyDoneError) Error() string {
+ return fmt.Sprintf("context already done: %s", e.err.Error())
+}
+
+func (e *contextAlreadyDoneError) SafeToRetry() bool {
+ return true
+}
+
+func (e *contextAlreadyDoneError) Unwrap() error {
+ return e.err
+}
+
+// newContextAlreadyDoneError double-wraps a context error in `contextAlreadyDoneError` and `errTimeout`.
+func newContextAlreadyDoneError(ctx context.Context) (err error) {
+ return &errTimeout{&contextAlreadyDoneError{err: ctx.Err()}}
+}
+
+func redactPW(connString string) string {
+ if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
+ if u, err := url.Parse(connString); err == nil {
+ return redactURL(u)
+ }
+ }
+ quotedKV := regexp.MustCompile(`password='[^']*'`)
+ connString = quotedKV.ReplaceAllLiteralString(connString, "password=xxxxx")
+ plainKV := regexp.MustCompile(`password=[^ ]*`)
+ connString = plainKV.ReplaceAllLiteralString(connString, "password=xxxxx")
+ brokenURL := regexp.MustCompile(`:[^:@]+?@`)
+ connString = brokenURL.ReplaceAllLiteralString(connString, ":xxxxxx@")
+ return connString
+}
+
+func redactURL(u *url.URL) string {
+ if u == nil {
+ return ""
+ }
+ if _, pwSet := u.User.Password(); pwSet {
+ u.User = url.UserPassword(u.User.Username(), "xxxxx")
+ }
+ return u.String()
+}
+
+type NotPreferredError struct {
+ err error
+ safeToRetry bool
+}
+
+func (e *NotPreferredError) Error() string {
+ return fmt.Sprintf("standby server not found: %s", e.err.Error())
+}
+
+func (e *NotPreferredError) SafeToRetry() bool {
+ return e.safeToRetry
+}
+
+func (e *NotPreferredError) Unwrap() error {
+ return e.err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/internal/bgreader/bgreader.go b/vendor/github.com/jackc/pgx/v5/pgconn/internal/bgreader/bgreader.go
new file mode 100644
index 0000000..e65c2c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/internal/bgreader/bgreader.go
@@ -0,0 +1,139 @@
+// Package bgreader provides a io.Reader that can optionally buffer reads in the background.
+package bgreader
+
+import (
+ "io"
+ "sync"
+
+ "github.com/jackc/pgx/v5/internal/iobufpool"
+)
+
+const (
+ StatusStopped = iota
+ StatusRunning
+ StatusStopping
+)
+
+// BGReader is an io.Reader that can optionally buffer reads in the background. It is safe for concurrent use.
+type BGReader struct {
+ r io.Reader
+
+ cond *sync.Cond
+ status int32
+ readResults []readResult
+}
+
+type readResult struct {
+ buf *[]byte
+ err error
+}
+
+// Start starts the backgrounder reader. If the background reader is already running this is a no-op. The background
+// reader will stop automatically when the underlying reader returns an error.
+func (r *BGReader) Start() {
+ r.cond.L.Lock()
+ defer r.cond.L.Unlock()
+
+ switch r.status {
+ case StatusStopped:
+ r.status = StatusRunning
+ go r.bgRead()
+ case StatusRunning:
+ // no-op
+ case StatusStopping:
+ r.status = StatusRunning
+ }
+}
+
+// Stop tells the background reader to stop after the in progress Read returns. It is safe to call Stop when the
+// background reader is not running.
+func (r *BGReader) Stop() {
+ r.cond.L.Lock()
+ defer r.cond.L.Unlock()
+
+ switch r.status {
+ case StatusStopped:
+ // no-op
+ case StatusRunning:
+ r.status = StatusStopping
+ case StatusStopping:
+ // no-op
+ }
+}
+
+// Status returns the current status of the background reader.
+func (r *BGReader) Status() int32 {
+ r.cond.L.Lock()
+ defer r.cond.L.Unlock()
+ return r.status
+}
+
+func (r *BGReader) bgRead() {
+ keepReading := true
+ for keepReading {
+ buf := iobufpool.Get(8192)
+ n, err := r.r.Read(*buf)
+ *buf = (*buf)[:n]
+
+ r.cond.L.Lock()
+ r.readResults = append(r.readResults, readResult{buf: buf, err: err})
+ if r.status == StatusStopping || err != nil {
+ r.status = StatusStopped
+ keepReading = false
+ }
+ r.cond.L.Unlock()
+ r.cond.Broadcast()
+ }
+}
+
+// Read implements the io.Reader interface.
+func (r *BGReader) Read(p []byte) (int, error) {
+ r.cond.L.Lock()
+ defer r.cond.L.Unlock()
+
+ if len(r.readResults) > 0 {
+ return r.readFromReadResults(p)
+ }
+
+ // There are no unread background read results and the background reader is stopped.
+ if r.status == StatusStopped {
+ return r.r.Read(p)
+ }
+
+ // Wait for results from the background reader
+ for len(r.readResults) == 0 {
+ r.cond.Wait()
+ }
+ return r.readFromReadResults(p)
+}
+
+// readBackgroundResults reads a result previously read by the background reader. r.cond.L must be held.
+func (r *BGReader) readFromReadResults(p []byte) (int, error) {
+ buf := r.readResults[0].buf
+ var err error
+
+ n := copy(p, *buf)
+ if n == len(*buf) {
+ err = r.readResults[0].err
+ iobufpool.Put(buf)
+ if len(r.readResults) == 1 {
+ r.readResults = nil
+ } else {
+ r.readResults = r.readResults[1:]
+ }
+ } else {
+ *buf = (*buf)[n:]
+ r.readResults[0].buf = buf
+ }
+
+ return n, err
+}
+
+func New(r io.Reader) *BGReader {
+ return &BGReader{
+ r: r,
+ cond: &sync.Cond{
+ L: &sync.Mutex{},
+ },
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/krb5.go b/vendor/github.com/jackc/pgx/v5/pgconn/krb5.go
new file mode 100644
index 0000000..3c1af34
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/krb5.go
@@ -0,0 +1,100 @@
+package pgconn
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/pgproto3"
+)
+
+// NewGSSFunc creates a GSS authentication provider, for use with
+// RegisterGSSProvider.
+type NewGSSFunc func() (GSS, error)
+
+var newGSS NewGSSFunc
+
+// RegisterGSSProvider registers a GSS authentication provider. For example, if
+// you need to use Kerberos to authenticate with your server, add this to your
+// main package:
+//
+// import "github.com/otan/gopgkrb5"
+//
+// func init() {
+// pgconn.RegisterGSSProvider(func() (pgconn.GSS, error) { return gopgkrb5.NewGSS() })
+// }
+func RegisterGSSProvider(newGSSArg NewGSSFunc) {
+ newGSS = newGSSArg
+}
+
+// GSS provides GSSAPI authentication (e.g., Kerberos).
+type GSS interface {
+ GetInitToken(host string, service string) ([]byte, error)
+ GetInitTokenFromSPN(spn string) ([]byte, error)
+ Continue(inToken []byte) (done bool, outToken []byte, err error)
+}
+
+func (c *PgConn) gssAuth() error {
+ if newGSS == nil {
+ return errors.New("kerberos error: no GSSAPI provider registered, see https://github.com/otan/gopgkrb5")
+ }
+ cli, err := newGSS()
+ if err != nil {
+ return err
+ }
+
+ var nextData []byte
+ if c.config.KerberosSpn != "" {
+ // Use the supplied SPN if provided.
+ nextData, err = cli.GetInitTokenFromSPN(c.config.KerberosSpn)
+ } else {
+ // Allow the kerberos service name to be overridden
+ service := "postgres"
+ if c.config.KerberosSrvName != "" {
+ service = c.config.KerberosSrvName
+ }
+ nextData, err = cli.GetInitToken(c.config.Host, service)
+ }
+ if err != nil {
+ return err
+ }
+
+ for {
+ gssResponse := &pgproto3.GSSResponse{
+ Data: nextData,
+ }
+ c.frontend.Send(gssResponse)
+ err = c.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ return err
+ }
+ resp, err := c.rxGSSContinue()
+ if err != nil {
+ return err
+ }
+ var done bool
+ done, nextData, err = cli.Continue(resp.Data)
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ }
+ return nil
+}
+
+func (c *PgConn) rxGSSContinue() (*pgproto3.AuthenticationGSSContinue, error) {
+ msg, err := c.receiveMessage()
+ if err != nil {
+ return nil, err
+ }
+
+ switch m := msg.(type) {
+ case *pgproto3.AuthenticationGSSContinue:
+ return m, nil
+ case *pgproto3.ErrorResponse:
+ return nil, ErrorResponseToPgError(m)
+ }
+
+ return nil, fmt.Errorf("expected AuthenticationGSSContinue message but received unexpected message %T", msg)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go b/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go
new file mode 100644
index 0000000..bf3eaec
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go
@@ -0,0 +1,2496 @@
+package pgconn
+
+import (
+ "container/list"
+ "context"
+ "crypto/md5"
+ "crypto/tls"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/iobufpool"
+ "github.com/jackc/pgx/v5/internal/pgio"
+ "github.com/jackc/pgx/v5/pgconn/ctxwatch"
+ "github.com/jackc/pgx/v5/pgconn/internal/bgreader"
+ "github.com/jackc/pgx/v5/pgproto3"
+)
+
+const (
+ connStatusUninitialized = iota
+ connStatusConnecting
+ connStatusClosed
+ connStatusIdle
+ connStatusBusy
+)
+
+// Notice represents a notice response message reported by the PostgreSQL server. Be aware that this is distinct from
+// LISTEN/NOTIFY notification.
+type Notice PgError
+
+// Notification is a message received from the PostgreSQL LISTEN/NOTIFY system
+type Notification struct {
+ PID uint32 // backend pid that sent the notification
+ Channel string // channel from which notification was received
+ Payload string
+}
+
+// DialFunc is a function that can be used to connect to a PostgreSQL server.
+type DialFunc func(ctx context.Context, network, addr string) (net.Conn, error)
+
+// LookupFunc is a function that can be used to lookup IPs addrs from host. Optionally an ip:port combination can be
+// returned in order to override the connection string's port.
+type LookupFunc func(ctx context.Context, host string) (addrs []string, err error)
+
+// BuildFrontendFunc is a function that can be used to create Frontend implementation for connection.
+type BuildFrontendFunc func(r io.Reader, w io.Writer) *pgproto3.Frontend
+
+// PgErrorHandler is a function that handles errors returned from Postgres. This function must return true to keep
+// the connection open. Returning false will cause the connection to be closed immediately. You should return
+// false on any FATAL-severity errors. This will not receive network errors. The *PgConn is provided so the handler is
+// aware of the origin of the error, but it must not invoke any query method.
+type PgErrorHandler func(*PgConn, *PgError) bool
+
+// NoticeHandler is a function that can handle notices received from the PostgreSQL server. Notices can be received at
+// any time, usually during handling of a query response. The *PgConn is provided so the handler is aware of the origin
+// of the notice, but it must not invoke any query method. Be aware that this is distinct from LISTEN/NOTIFY
+// notification.
+type NoticeHandler func(*PgConn, *Notice)
+
+// NotificationHandler is a function that can handle notifications received from the PostgreSQL server. Notifications
+// can be received at any time, usually during handling of a query response. The *PgConn is provided so the handler is
+// aware of the origin of the notice, but it must not invoke any query method. Be aware that this is distinct from a
+// notice event.
+type NotificationHandler func(*PgConn, *Notification)
+
+// PgConn is a low-level PostgreSQL connection handle. It is not safe for concurrent usage.
+type PgConn struct {
+ conn net.Conn
+ pid uint32 // backend pid
+ secretKey uint32 // key to use to send a cancel query message to the server
+ parameterStatuses map[string]string // parameters that have been reported by the server
+ txStatus byte
+ frontend *pgproto3.Frontend
+ bgReader *bgreader.BGReader
+ slowWriteTimer *time.Timer
+ bgReaderStarted chan struct{}
+
+ customData map[string]any
+
+ config *Config
+
+ status byte // One of connStatus* constants
+
+ bufferingReceive bool
+ bufferingReceiveMux sync.Mutex
+ bufferingReceiveMsg pgproto3.BackendMessage
+ bufferingReceiveErr error
+
+ peekedMsg pgproto3.BackendMessage
+
+ // Reusable / preallocated resources
+ resultReader ResultReader
+ multiResultReader MultiResultReader
+ pipeline Pipeline
+ contextWatcher *ctxwatch.ContextWatcher
+ fieldDescriptions [16]FieldDescription
+
+ cleanupDone chan struct{}
+}
+
+// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or keyword/value
+// format) to provide configuration. See documentation for [ParseConfig] for details. ctx can be used to cancel a
+// connect attempt.
+func Connect(ctx context.Context, connString string) (*PgConn, error) {
+ config, err := ParseConfig(connString)
+ if err != nil {
+ return nil, err
+ }
+
+ return ConnectConfig(ctx, config)
+}
+
+// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or keyword/value
+// format) and ParseConfigOptions to provide additional configuration. See documentation for [ParseConfig] for details.
+// ctx can be used to cancel a connect attempt.
+func ConnectWithOptions(ctx context.Context, connString string, parseConfigOptions ParseConfigOptions) (*PgConn, error) {
+ config, err := ParseConfigWithOptions(connString, parseConfigOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ return ConnectConfig(ctx, config)
+}
+
+// Connect establishes a connection to a PostgreSQL server using config. config must have been constructed with
+// [ParseConfig]. ctx can be used to cancel a connect attempt.
+//
+// If config.Fallbacks are present they will sequentially be tried in case of error establishing network connection. An
+// authentication error will terminate the chain of attempts (like libpq:
+// https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS) and be returned as the error.
+func ConnectConfig(ctx context.Context, config *Config) (*PgConn, error) {
+ // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
+ // zero values.
+ if !config.createdByParseConfig {
+ panic("config must be created by ParseConfig")
+ }
+
+ var allErrors []error
+
+ connectConfigs, errs := buildConnectOneConfigs(ctx, config)
+ if len(errs) > 0 {
+ allErrors = append(allErrors, errs...)
+ }
+
+ if len(connectConfigs) == 0 {
+ return nil, &ConnectError{Config: config, err: fmt.Errorf("hostname resolving error: %w", errors.Join(allErrors...))}
+ }
+
+ pgConn, errs := connectPreferred(ctx, config, connectConfigs)
+ if len(errs) > 0 {
+ allErrors = append(allErrors, errs...)
+ return nil, &ConnectError{Config: config, err: errors.Join(allErrors...)}
+ }
+
+ if config.AfterConnect != nil {
+ err := config.AfterConnect(ctx, pgConn)
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, &ConnectError{Config: config, err: fmt.Errorf("AfterConnect error: %w", err)}
+ }
+ }
+
+ return pgConn, nil
+}
+
+// buildConnectOneConfigs resolves hostnames and builds a list of connectOneConfigs to try connecting to. It returns a
+// slice of successfully resolved connectOneConfigs and a slice of errors. It is possible for both slices to contain
+// values if some hosts were successfully resolved and others were not.
+func buildConnectOneConfigs(ctx context.Context, config *Config) ([]*connectOneConfig, []error) {
+ // Simplify usage by treating primary config and fallbacks the same.
+ fallbackConfigs := []*FallbackConfig{
+ {
+ Host: config.Host,
+ Port: config.Port,
+ TLSConfig: config.TLSConfig,
+ },
+ }
+ fallbackConfigs = append(fallbackConfigs, config.Fallbacks...)
+
+ var configs []*connectOneConfig
+
+ var allErrors []error
+
+ for _, fb := range fallbackConfigs {
+ // skip resolve for unix sockets
+ if isAbsolutePath(fb.Host) {
+ network, address := NetworkAddress(fb.Host, fb.Port)
+ configs = append(configs, &connectOneConfig{
+ network: network,
+ address: address,
+ originalHostname: fb.Host,
+ tlsConfig: fb.TLSConfig,
+ })
+
+ continue
+ }
+
+ ips, err := config.LookupFunc(ctx, fb.Host)
+ if err != nil {
+ allErrors = append(allErrors, err)
+ continue
+ }
+
+ for _, ip := range ips {
+ splitIP, splitPort, err := net.SplitHostPort(ip)
+ if err == nil {
+ port, err := strconv.ParseUint(splitPort, 10, 16)
+ if err != nil {
+ return nil, []error{fmt.Errorf("error parsing port (%s) from lookup: %w", splitPort, err)}
+ }
+ network, address := NetworkAddress(splitIP, uint16(port))
+ configs = append(configs, &connectOneConfig{
+ network: network,
+ address: address,
+ originalHostname: fb.Host,
+ tlsConfig: fb.TLSConfig,
+ })
+ } else {
+ network, address := NetworkAddress(ip, fb.Port)
+ configs = append(configs, &connectOneConfig{
+ network: network,
+ address: address,
+ originalHostname: fb.Host,
+ tlsConfig: fb.TLSConfig,
+ })
+ }
+ }
+ }
+
+ return configs, allErrors
+}
+
+// connectPreferred attempts to connect to the preferred host from connectOneConfigs. The connections are attempted in
+// order. If a connection is successful it is returned. If no connection is successful then all errors are returned. If
+// a connection attempt returns a [NotPreferredError], then that host will be used if no other hosts are successful.
+func connectPreferred(ctx context.Context, config *Config, connectOneConfigs []*connectOneConfig) (*PgConn, []error) {
+ octx := ctx
+ var allErrors []error
+
+ var fallbackConnectOneConfig *connectOneConfig
+ for i, c := range connectOneConfigs {
+ // ConnectTimeout restricts the whole connection process.
+ if config.ConnectTimeout != 0 {
+ // create new context first time or when previous host was different
+ if i == 0 || (connectOneConfigs[i].address != connectOneConfigs[i-1].address) {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout)
+ defer cancel()
+ }
+ } else {
+ ctx = octx
+ }
+
+ pgConn, err := connectOne(ctx, config, c, false)
+ if pgConn != nil {
+ return pgConn, nil
+ }
+
+ allErrors = append(allErrors, err)
+
+ var pgErr *PgError
+ if errors.As(err, &pgErr) {
+ // pgx will try next host even if libpq does not in certain cases (see #2246)
+ // consider change for the next major version
+
+ const ERRCODE_INVALID_PASSWORD = "28P01"
+ const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist
+ const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege
+
+ // auth failed due to invalid password, db does not exist or user has no permission
+ if pgErr.Code == ERRCODE_INVALID_PASSWORD ||
+ pgErr.Code == ERRCODE_INVALID_CATALOG_NAME ||
+ pgErr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE {
+ return nil, allErrors
+ }
+ }
+
+ var npErr *NotPreferredError
+ if errors.As(err, &npErr) {
+ fallbackConnectOneConfig = c
+ }
+ }
+
+ if fallbackConnectOneConfig != nil {
+ pgConn, err := connectOne(ctx, config, fallbackConnectOneConfig, true)
+ if err == nil {
+ return pgConn, nil
+ }
+ allErrors = append(allErrors, err)
+ }
+
+ return nil, allErrors
+}
+
+// connectOne makes one connection attempt to a single host.
+func connectOne(ctx context.Context, config *Config, connectConfig *connectOneConfig,
+ ignoreNotPreferredErr bool,
+) (*PgConn, error) {
+ pgConn := new(PgConn)
+ pgConn.config = config
+ pgConn.cleanupDone = make(chan struct{})
+ pgConn.customData = make(map[string]any)
+
+ var err error
+
+ newPerDialConnectError := func(msg string, err error) *perDialConnectError {
+ err = normalizeTimeoutError(ctx, err)
+ e := &perDialConnectError{address: connectConfig.address, originalHostname: connectConfig.originalHostname, err: fmt.Errorf("%s: %w", msg, err)}
+ return e
+ }
+
+ pgConn.conn, err = config.DialFunc(ctx, connectConfig.network, connectConfig.address)
+ if err != nil {
+ return nil, newPerDialConnectError("dial error", err)
+ }
+
+ if connectConfig.tlsConfig != nil {
+ pgConn.contextWatcher = ctxwatch.NewContextWatcher(&DeadlineContextWatcherHandler{Conn: pgConn.conn})
+ pgConn.contextWatcher.Watch(ctx)
+ var (
+ tlsConn net.Conn
+ err error
+ )
+ if config.SSLNegotiation == "direct" {
+ tlsConn = tls.Client(pgConn.conn, connectConfig.tlsConfig)
+ } else {
+ tlsConn, err = startTLS(pgConn.conn, connectConfig.tlsConfig)
+ }
+ pgConn.contextWatcher.Unwatch() // Always unwatch `netConn` after TLS.
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("tls error", err)
+ }
+
+ pgConn.conn = tlsConn
+ }
+
+ pgConn.contextWatcher = ctxwatch.NewContextWatcher(config.BuildContextWatcherHandler(pgConn))
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+
+ pgConn.parameterStatuses = make(map[string]string)
+ pgConn.status = connStatusConnecting
+ pgConn.bgReader = bgreader.New(pgConn.conn)
+ pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
+ func() {
+ pgConn.bgReader.Start()
+ pgConn.bgReaderStarted <- struct{}{}
+ },
+ )
+ pgConn.slowWriteTimer.Stop()
+ pgConn.bgReaderStarted = make(chan struct{})
+ pgConn.frontend = config.BuildFrontend(pgConn.bgReader, pgConn.conn)
+
+ startupMsg := pgproto3.StartupMessage{
+ ProtocolVersion: pgproto3.ProtocolVersionNumber,
+ Parameters: make(map[string]string),
+ }
+
+ // Copy default run-time params
+ for k, v := range config.RuntimeParams {
+ startupMsg.Parameters[k] = v
+ }
+
+ startupMsg.Parameters["user"] = config.User
+ if config.Database != "" {
+ startupMsg.Parameters["database"] = config.Database
+ }
+
+ pgConn.frontend.Send(&startupMsg)
+ if err := pgConn.flushWithPotentialWriteReadDeadlock(); err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed to write startup message", err)
+ }
+
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.conn.Close()
+ if err, ok := err.(*PgError); ok {
+ return nil, newPerDialConnectError("server error", err)
+ }
+ return nil, newPerDialConnectError("failed to receive message", err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.BackendKeyData:
+ pgConn.pid = msg.ProcessID
+ pgConn.secretKey = msg.SecretKey
+
+ case *pgproto3.AuthenticationOk:
+ case *pgproto3.AuthenticationCleartextPassword:
+ err = pgConn.txPasswordMessage(pgConn.config.Password)
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed to write password message", err)
+ }
+ case *pgproto3.AuthenticationMD5Password:
+ digestedPassword := "md5" + hexMD5(hexMD5(pgConn.config.Password+pgConn.config.User)+string(msg.Salt[:]))
+ err = pgConn.txPasswordMessage(digestedPassword)
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed to write password message", err)
+ }
+ case *pgproto3.AuthenticationSASL:
+ err = pgConn.scramAuth(msg.AuthMechanisms)
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed SASL auth", err)
+ }
+ case *pgproto3.AuthenticationGSS:
+ err = pgConn.gssAuth()
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed GSS auth", err)
+ }
+ case *pgproto3.ReadyForQuery:
+ pgConn.status = connStatusIdle
+ if config.ValidateConnect != nil {
+ // ValidateConnect may execute commands that cause the context to be watched again. Unwatch first to avoid
+ // the watch already in progress panic. This is that last thing done by this method so there is no need to
+ // restart the watch after ValidateConnect returns.
+ //
+ // See https://github.com/jackc/pgconn/issues/40.
+ pgConn.contextWatcher.Unwatch()
+
+ err := config.ValidateConnect(ctx, pgConn)
+ if err != nil {
+ if _, ok := err.(*NotPreferredError); ignoreNotPreferredErr && ok {
+ return pgConn, nil
+ }
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("ValidateConnect failed", err)
+ }
+ }
+ return pgConn, nil
+ case *pgproto3.ParameterStatus, *pgproto3.NoticeResponse:
+ // handled by ReceiveMessage
+ case *pgproto3.ErrorResponse:
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("server error", ErrorResponseToPgError(msg))
+ default:
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("received unexpected message", err)
+ }
+ }
+}
+
+func startTLS(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) {
+ err := binary.Write(conn, binary.BigEndian, []int32{8, 80877103})
+ if err != nil {
+ return nil, err
+ }
+
+ response := make([]byte, 1)
+ if _, err = io.ReadFull(conn, response); err != nil {
+ return nil, err
+ }
+
+ if response[0] != 'S' {
+ return nil, errors.New("server refused TLS connection")
+ }
+
+ return tls.Client(conn, tlsConfig), nil
+}
+
+func (pgConn *PgConn) txPasswordMessage(password string) (err error) {
+ pgConn.frontend.Send(&pgproto3.PasswordMessage{Password: password})
+ return pgConn.flushWithPotentialWriteReadDeadlock()
+}
+
+func hexMD5(s string) string {
+ hash := md5.New()
+ io.WriteString(hash, s)
+ return hex.EncodeToString(hash.Sum(nil))
+}
+
+func (pgConn *PgConn) signalMessage() chan struct{} {
+ if pgConn.bufferingReceive {
+ panic("BUG: signalMessage when already in progress")
+ }
+
+ pgConn.bufferingReceive = true
+ pgConn.bufferingReceiveMux.Lock()
+
+ ch := make(chan struct{})
+ go func() {
+ pgConn.bufferingReceiveMsg, pgConn.bufferingReceiveErr = pgConn.frontend.Receive()
+ pgConn.bufferingReceiveMux.Unlock()
+ close(ch)
+ }()
+
+ return ch
+}
+
+// ReceiveMessage receives one wire protocol message from the PostgreSQL server. It must only be used when the
+// connection is not busy. e.g. It is an error to call ReceiveMessage while reading the result of a query. The messages
+// are still handled by the core pgconn message handling system so receiving a NotificationResponse will still trigger
+// the OnNotification callback.
+//
+// This is a very low level method that requires deep understanding of the PostgreSQL wire protocol to use correctly.
+// See https://www.postgresql.org/docs/current/protocol.html.
+func (pgConn *PgConn) ReceiveMessage(ctx context.Context) (pgproto3.BackendMessage, error) {
+ if err := pgConn.lock(); err != nil {
+ return nil, err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return nil, newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ err = &pgconnError{
+ msg: "receive message failed",
+ err: normalizeTimeoutError(ctx, err),
+ safeToRetry: true,
+ }
+ }
+ return msg, err
+}
+
+// peekMessage peeks at the next message without setting up context cancellation.
+func (pgConn *PgConn) peekMessage() (pgproto3.BackendMessage, error) {
+ if pgConn.peekedMsg != nil {
+ return pgConn.peekedMsg, nil
+ }
+
+ var msg pgproto3.BackendMessage
+ var err error
+ if pgConn.bufferingReceive {
+ pgConn.bufferingReceiveMux.Lock()
+ msg = pgConn.bufferingReceiveMsg
+ err = pgConn.bufferingReceiveErr
+ pgConn.bufferingReceiveMux.Unlock()
+ pgConn.bufferingReceive = false
+
+ // If a timeout error happened in the background try the read again.
+ var netErr net.Error
+ if errors.As(err, &netErr) && netErr.Timeout() {
+ msg, err = pgConn.frontend.Receive()
+ }
+ } else {
+ msg, err = pgConn.frontend.Receive()
+ }
+
+ if err != nil {
+ // Close on anything other than timeout error - everything else is fatal
+ var netErr net.Error
+ isNetErr := errors.As(err, &netErr)
+ if !(isNetErr && netErr.Timeout()) {
+ pgConn.asyncClose()
+ }
+
+ return nil, err
+ }
+
+ pgConn.peekedMsg = msg
+ return msg, nil
+}
+
+// receiveMessage receives a message without setting up context cancellation
+func (pgConn *PgConn) receiveMessage() (pgproto3.BackendMessage, error) {
+ msg, err := pgConn.peekMessage()
+ if err != nil {
+ return nil, err
+ }
+ pgConn.peekedMsg = nil
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ pgConn.txStatus = msg.TxStatus
+ case *pgproto3.ParameterStatus:
+ pgConn.parameterStatuses[msg.Name] = msg.Value
+ case *pgproto3.ErrorResponse:
+ err := ErrorResponseToPgError(msg)
+ if pgConn.config.OnPgError != nil && !pgConn.config.OnPgError(pgConn, err) {
+ pgConn.status = connStatusClosed
+ pgConn.conn.Close() // Ignore error as the connection is already broken and there is already an error to return.
+ close(pgConn.cleanupDone)
+ return nil, err
+ }
+ case *pgproto3.NoticeResponse:
+ if pgConn.config.OnNotice != nil {
+ pgConn.config.OnNotice(pgConn, noticeResponseToNotice(msg))
+ }
+ case *pgproto3.NotificationResponse:
+ if pgConn.config.OnNotification != nil {
+ pgConn.config.OnNotification(pgConn, &Notification{PID: msg.PID, Channel: msg.Channel, Payload: msg.Payload})
+ }
+ }
+
+ return msg, nil
+}
+
+// Conn returns the underlying net.Conn. This rarely necessary. If the connection will be directly used for reading or
+// writing then SyncConn should usually be called before Conn.
+func (pgConn *PgConn) Conn() net.Conn {
+ return pgConn.conn
+}
+
+// PID returns the backend PID.
+func (pgConn *PgConn) PID() uint32 {
+ return pgConn.pid
+}
+
+// TxStatus returns the current TxStatus as reported by the server in the ReadyForQuery message.
+//
+// Possible return values:
+//
+// 'I' - idle / not in transaction
+// 'T' - in a transaction
+// 'E' - in a failed transaction
+//
+// See https://www.postgresql.org/docs/current/protocol-message-formats.html.
+func (pgConn *PgConn) TxStatus() byte {
+ return pgConn.txStatus
+}
+
+// SecretKey returns the backend secret key used to send a cancel query message to the server.
+func (pgConn *PgConn) SecretKey() uint32 {
+ return pgConn.secretKey
+}
+
+// Frontend returns the underlying *pgproto3.Frontend. This rarely necessary.
+func (pgConn *PgConn) Frontend() *pgproto3.Frontend {
+ return pgConn.frontend
+}
+
+// Close closes a connection. It is safe to call Close on an already closed connection. Close attempts a clean close by
+// sending the exit message to PostgreSQL. However, this could block so ctx is available to limit the time to wait. The
+// underlying net.Conn.Close() will always be called regardless of any other errors.
+func (pgConn *PgConn) Close(ctx context.Context) error {
+ if pgConn.status == connStatusClosed {
+ return nil
+ }
+ pgConn.status = connStatusClosed
+
+ defer close(pgConn.cleanupDone)
+ defer pgConn.conn.Close()
+
+ if ctx != context.Background() {
+ // Close may be called while a cancellable query is in progress. This will most often be triggered by panic when
+ // a defer closes the connection (possibly indirectly via a transaction or a connection pool). Unwatch to end any
+ // previous watch. It is safe to Unwatch regardless of whether a watch is already is progress.
+ //
+ // See https://github.com/jackc/pgconn/issues/29
+ pgConn.contextWatcher.Unwatch()
+
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ // Ignore any errors sending Terminate message and waiting for server to close connection.
+ // This mimics the behavior of libpq PQfinish. It calls closePGconn which calls sendTerminateConn which purposefully
+ // ignores errors.
+ //
+ // See https://github.com/jackc/pgx/issues/637
+ pgConn.frontend.Send(&pgproto3.Terminate{})
+ pgConn.flushWithPotentialWriteReadDeadlock()
+
+ return pgConn.conn.Close()
+}
+
+// asyncClose marks the connection as closed and asynchronously sends a cancel query message and closes the underlying
+// connection.
+func (pgConn *PgConn) asyncClose() {
+ if pgConn.status == connStatusClosed {
+ return
+ }
+ pgConn.status = connStatusClosed
+
+ go func() {
+ defer close(pgConn.cleanupDone)
+ defer pgConn.conn.Close()
+
+ deadline := time.Now().Add(time.Second * 15)
+
+ ctx, cancel := context.WithDeadline(context.Background(), deadline)
+ defer cancel()
+
+ pgConn.CancelRequest(ctx)
+
+ pgConn.conn.SetDeadline(deadline)
+
+ pgConn.frontend.Send(&pgproto3.Terminate{})
+ pgConn.flushWithPotentialWriteReadDeadlock()
+ }()
+}
+
+// CleanupDone returns a channel that will be closed after all underlying resources have been cleaned up. A closed
+// connection is no longer usable, but underlying resources, in particular the net.Conn, may not have finished closing
+// yet. This is because certain errors such as a context cancellation require that the interrupted function call return
+// immediately, but the error may also cause the connection to be closed. In these cases the underlying resources are
+// closed asynchronously.
+//
+// This is only likely to be useful to connection pools. It gives them a way avoid establishing a new connection while
+// an old connection is still being cleaned up and thereby exceeding the maximum pool size.
+func (pgConn *PgConn) CleanupDone() chan (struct{}) {
+ return pgConn.cleanupDone
+}
+
+// IsClosed reports if the connection has been closed.
+//
+// CleanupDone() can be used to determine if all cleanup has been completed.
+func (pgConn *PgConn) IsClosed() bool {
+ return pgConn.status < connStatusIdle
+}
+
+// IsBusy reports if the connection is busy.
+func (pgConn *PgConn) IsBusy() bool {
+ return pgConn.status == connStatusBusy
+}
+
+// lock locks the connection.
+func (pgConn *PgConn) lock() error {
+ switch pgConn.status {
+ case connStatusBusy:
+ return &connLockError{status: "conn busy"} // This only should be possible in case of an application bug.
+ case connStatusClosed:
+ return &connLockError{status: "conn closed"}
+ case connStatusUninitialized:
+ return &connLockError{status: "conn uninitialized"}
+ }
+ pgConn.status = connStatusBusy
+ return nil
+}
+
+func (pgConn *PgConn) unlock() {
+ switch pgConn.status {
+ case connStatusBusy:
+ pgConn.status = connStatusIdle
+ case connStatusClosed:
+ default:
+ panic("BUG: cannot unlock unlocked connection") // This should only be possible if there is a bug in this package.
+ }
+}
+
+// ParameterStatus returns the value of a parameter reported by the server (e.g.
+// server_version). Returns an empty string for unknown parameters.
+func (pgConn *PgConn) ParameterStatus(key string) string {
+ return pgConn.parameterStatuses[key]
+}
+
+// CommandTag is the status text returned by PostgreSQL for a query.
+type CommandTag struct {
+ s string
+}
+
+// NewCommandTag makes a CommandTag from s.
+func NewCommandTag(s string) CommandTag {
+ return CommandTag{s: s}
+}
+
+// RowsAffected returns the number of rows affected. If the CommandTag was not
+// for a row affecting command (e.g. "CREATE TABLE") then it returns 0.
+func (ct CommandTag) RowsAffected() int64 {
+ // Find last non-digit
+ idx := -1
+ for i := len(ct.s) - 1; i >= 0; i-- {
+ if ct.s[i] >= '0' && ct.s[i] <= '9' {
+ idx = i
+ } else {
+ break
+ }
+ }
+
+ if idx == -1 {
+ return 0
+ }
+
+ var n int64
+ for _, b := range ct.s[idx:] {
+ n = n*10 + int64(b-'0')
+ }
+
+ return n
+}
+
+func (ct CommandTag) String() string {
+ return ct.s
+}
+
+// Insert is true if the command tag starts with "INSERT".
+func (ct CommandTag) Insert() bool {
+ return strings.HasPrefix(ct.s, "INSERT")
+}
+
+// Update is true if the command tag starts with "UPDATE".
+func (ct CommandTag) Update() bool {
+ return strings.HasPrefix(ct.s, "UPDATE")
+}
+
+// Delete is true if the command tag starts with "DELETE".
+func (ct CommandTag) Delete() bool {
+ return strings.HasPrefix(ct.s, "DELETE")
+}
+
+// Select is true if the command tag starts with "SELECT".
+func (ct CommandTag) Select() bool {
+ return strings.HasPrefix(ct.s, "SELECT")
+}
+
+type FieldDescription struct {
+ Name string
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier int32
+ Format int16
+}
+
+func (pgConn *PgConn) convertRowDescription(dst []FieldDescription, rd *pgproto3.RowDescription) []FieldDescription {
+ if cap(dst) >= len(rd.Fields) {
+ dst = dst[:len(rd.Fields):len(rd.Fields)]
+ } else {
+ dst = make([]FieldDescription, len(rd.Fields))
+ }
+
+ for i := range rd.Fields {
+ dst[i].Name = string(rd.Fields[i].Name)
+ dst[i].TableOID = rd.Fields[i].TableOID
+ dst[i].TableAttributeNumber = rd.Fields[i].TableAttributeNumber
+ dst[i].DataTypeOID = rd.Fields[i].DataTypeOID
+ dst[i].DataTypeSize = rd.Fields[i].DataTypeSize
+ dst[i].TypeModifier = rd.Fields[i].TypeModifier
+ dst[i].Format = rd.Fields[i].Format
+ }
+
+ return dst
+}
+
+type StatementDescription struct {
+ Name string
+ SQL string
+ ParamOIDs []uint32
+ Fields []FieldDescription
+}
+
+// Prepare creates a prepared statement. If the name is empty, the anonymous prepared statement will be used. This
+// allows Prepare to also to describe statements without creating a server-side prepared statement.
+//
+// Prepare does not send a PREPARE statement to the server. It uses the PostgreSQL Parse and Describe protocol messages
+// directly.
+func (pgConn *PgConn) Prepare(ctx context.Context, name, sql string, paramOIDs []uint32) (*StatementDescription, error) {
+ if err := pgConn.lock(); err != nil {
+ return nil, err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return nil, newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ pgConn.frontend.SendParse(&pgproto3.Parse{Name: name, Query: sql, ParameterOIDs: paramOIDs})
+ pgConn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'S', Name: name})
+ pgConn.frontend.SendSync(&pgproto3.Sync{})
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ return nil, err
+ }
+
+ psd := &StatementDescription{Name: name, SQL: sql}
+
+ var parseErr error
+
+readloop:
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.asyncClose()
+ return nil, normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ParameterDescription:
+ psd.ParamOIDs = make([]uint32, len(msg.ParameterOIDs))
+ copy(psd.ParamOIDs, msg.ParameterOIDs)
+ case *pgproto3.RowDescription:
+ psd.Fields = pgConn.convertRowDescription(nil, msg)
+ case *pgproto3.ErrorResponse:
+ parseErr = ErrorResponseToPgError(msg)
+ case *pgproto3.ReadyForQuery:
+ break readloop
+ }
+ }
+
+ if parseErr != nil {
+ return nil, parseErr
+ }
+ return psd, nil
+}
+
+// Deallocate deallocates a prepared statement.
+//
+// Deallocate does not send a DEALLOCATE statement to the server. It uses the PostgreSQL Close protocol message
+// directly. This has slightly different behavior than executing DEALLOCATE statement.
+// - Deallocate can succeed in an aborted transaction.
+// - Deallocating a non-existent prepared statement is not an error.
+func (pgConn *PgConn) Deallocate(ctx context.Context, name string) error {
+ if err := pgConn.lock(); err != nil {
+ return err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ pgConn.frontend.SendClose(&pgproto3.Close{ObjectType: 'S', Name: name})
+ pgConn.frontend.SendSync(&pgproto3.Sync{})
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ return err
+ }
+
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.asyncClose()
+ return normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ErrorResponse:
+ return ErrorResponseToPgError(msg)
+ case *pgproto3.ReadyForQuery:
+ return nil
+ }
+ }
+}
+
+// ErrorResponseToPgError converts a wire protocol error message to a *PgError.
+func ErrorResponseToPgError(msg *pgproto3.ErrorResponse) *PgError {
+ return &PgError{
+ Severity: msg.Severity,
+ SeverityUnlocalized: msg.SeverityUnlocalized,
+ Code: string(msg.Code),
+ Message: string(msg.Message),
+ Detail: string(msg.Detail),
+ Hint: msg.Hint,
+ Position: msg.Position,
+ InternalPosition: msg.InternalPosition,
+ InternalQuery: string(msg.InternalQuery),
+ Where: string(msg.Where),
+ SchemaName: string(msg.SchemaName),
+ TableName: string(msg.TableName),
+ ColumnName: string(msg.ColumnName),
+ DataTypeName: string(msg.DataTypeName),
+ ConstraintName: msg.ConstraintName,
+ File: string(msg.File),
+ Line: msg.Line,
+ Routine: string(msg.Routine),
+ }
+}
+
+func noticeResponseToNotice(msg *pgproto3.NoticeResponse) *Notice {
+ pgerr := ErrorResponseToPgError((*pgproto3.ErrorResponse)(msg))
+ return (*Notice)(pgerr)
+}
+
+// CancelRequest sends a cancel request to the PostgreSQL server. It returns an error if unable to deliver the cancel
+// request, but lack of an error does not ensure that the query was canceled. As specified in the documentation, there
+// is no way to be sure a query was canceled. See https://www.postgresql.org/docs/11/protocol-flow.html#id-1.10.5.7.9
+func (pgConn *PgConn) CancelRequest(ctx context.Context) error {
+ // Open a cancellation request to the same server. The address is taken from the net.Conn directly instead of reusing
+ // the connection config. This is important in high availability configurations where fallback connections may be
+ // specified or DNS may be used to load balance.
+ serverAddr := pgConn.conn.RemoteAddr()
+ var serverNetwork string
+ var serverAddress string
+ if serverAddr.Network() == "unix" {
+ // for unix sockets, RemoteAddr() calls getpeername() which returns the name the
+ // server passed to bind(). For Postgres, this is always a relative path "./.s.PGSQL.5432"
+ // so connecting to it will fail. Fall back to the config's value
+ serverNetwork, serverAddress = NetworkAddress(pgConn.config.Host, pgConn.config.Port)
+ } else {
+ serverNetwork, serverAddress = serverAddr.Network(), serverAddr.String()
+ }
+ cancelConn, err := pgConn.config.DialFunc(ctx, serverNetwork, serverAddress)
+ if err != nil {
+ // In case of unix sockets, RemoteAddr() returns only the file part of the path. If the
+ // first connect failed, try the config.
+ if serverAddr.Network() != "unix" {
+ return err
+ }
+ serverNetwork, serverAddr := NetworkAddress(pgConn.config.Host, pgConn.config.Port)
+ cancelConn, err = pgConn.config.DialFunc(ctx, serverNetwork, serverAddr)
+ if err != nil {
+ return err
+ }
+ }
+ defer cancelConn.Close()
+
+ if ctx != context.Background() {
+ contextWatcher := ctxwatch.NewContextWatcher(&DeadlineContextWatcherHandler{Conn: cancelConn})
+ contextWatcher.Watch(ctx)
+ defer contextWatcher.Unwatch()
+ }
+
+ buf := make([]byte, 16)
+ binary.BigEndian.PutUint32(buf[0:4], 16)
+ binary.BigEndian.PutUint32(buf[4:8], 80877102)
+ binary.BigEndian.PutUint32(buf[8:12], pgConn.pid)
+ binary.BigEndian.PutUint32(buf[12:16], pgConn.secretKey)
+
+ if _, err := cancelConn.Write(buf); err != nil {
+ return fmt.Errorf("write to connection for cancellation: %w", err)
+ }
+
+ // Wait for the cancel request to be acknowledged by the server.
+ // It copies the behavior of the libpq: https://github.com/postgres/postgres/blob/REL_16_0/src/interfaces/libpq/fe-connect.c#L4946-L4960
+ _, _ = cancelConn.Read(buf)
+
+ return nil
+}
+
+// WaitForNotification waits for a LISTEN/NOTIFY message to be received. It returns an error if a notification was not
+// received.
+func (pgConn *PgConn) WaitForNotification(ctx context.Context) error {
+ if err := pgConn.lock(); err != nil {
+ return err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return newContextAlreadyDoneError(ctx)
+ default:
+ }
+
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ return normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg.(type) {
+ case *pgproto3.NotificationResponse:
+ return nil
+ }
+ }
+}
+
+// Exec executes SQL via the PostgreSQL simple query protocol. SQL may contain multiple queries. Execution is
+// implicitly wrapped in a transaction unless a transaction is already in progress or SQL contains transaction control
+// statements.
+//
+// Prefer ExecParams unless executing arbitrary SQL that may contain multiple queries.
+func (pgConn *PgConn) Exec(ctx context.Context, sql string) *MultiResultReader {
+ if err := pgConn.lock(); err != nil {
+ return &MultiResultReader{
+ closed: true,
+ err: err,
+ }
+ }
+
+ pgConn.multiResultReader = MultiResultReader{
+ pgConn: pgConn,
+ ctx: ctx,
+ }
+ multiResult := &pgConn.multiResultReader
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ multiResult.closed = true
+ multiResult.err = newContextAlreadyDoneError(ctx)
+ pgConn.unlock()
+ return multiResult
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ }
+
+ pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ pgConn.contextWatcher.Unwatch()
+ multiResult.closed = true
+ multiResult.err = err
+ pgConn.unlock()
+ return multiResult
+ }
+
+ return multiResult
+}
+
+// ExecParams executes a command via the PostgreSQL extended query protocol.
+//
+// sql is a SQL command string. It may only contain one query. Parameter substitution is positional using $1, $2, $3,
+// etc.
+//
+// paramValues are the parameter values. It must be encoded in the format given by paramFormats.
+//
+// paramOIDs is a slice of data type OIDs for paramValues. If paramOIDs is nil, the server will infer the data type for
+// all parameters. Any paramOID element that is 0 that will cause the server to infer the data type for that parameter.
+// ExecParams will panic if len(paramOIDs) is not 0, 1, or len(paramValues).
+//
+// paramFormats is a slice of format codes determining for each paramValue column whether it is encoded in text or
+// binary format. If paramFormats is nil all params are text format. ExecParams will panic if
+// len(paramFormats) is not 0, 1, or len(paramValues).
+//
+// resultFormats is a slice of format codes determining for each result column whether it is encoded in text or
+// binary format. If resultFormats is nil all results will be in text format.
+//
+// ResultReader must be closed before PgConn can be used again.
+func (pgConn *PgConn) ExecParams(ctx context.Context, sql string, paramValues [][]byte, paramOIDs []uint32, paramFormats []int16, resultFormats []int16) *ResultReader {
+ result := pgConn.execExtendedPrefix(ctx, paramValues)
+ if result.closed {
+ return result
+ }
+
+ pgConn.frontend.SendParse(&pgproto3.Parse{Query: sql, ParameterOIDs: paramOIDs})
+ pgConn.frontend.SendBind(&pgproto3.Bind{ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats})
+
+ pgConn.execExtendedSuffix(result)
+
+ return result
+}
+
+// ExecPrepared enqueues the execution of a prepared statement via the PostgreSQL extended query protocol.
+//
+// paramValues are the parameter values. It must be encoded in the format given by paramFormats.
+//
+// paramFormats is a slice of format codes determining for each paramValue column whether it is encoded in text or
+// binary format. If paramFormats is nil all params are text format. ExecPrepared will panic if
+// len(paramFormats) is not 0, 1, or len(paramValues).
+//
+// resultFormats is a slice of format codes determining for each result column whether it is encoded in text or
+// binary format. If resultFormats is nil all results will be in text format.
+//
+// ResultReader must be closed before PgConn can be used again.
+func (pgConn *PgConn) ExecPrepared(ctx context.Context, stmtName string, paramValues [][]byte, paramFormats []int16, resultFormats []int16) *ResultReader {
+ result := pgConn.execExtendedPrefix(ctx, paramValues)
+ if result.closed {
+ return result
+ }
+
+ pgConn.frontend.SendBind(&pgproto3.Bind{PreparedStatement: stmtName, ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats})
+
+ pgConn.execExtendedSuffix(result)
+
+ return result
+}
+
+func (pgConn *PgConn) execExtendedPrefix(ctx context.Context, paramValues [][]byte) *ResultReader {
+ pgConn.resultReader = ResultReader{
+ pgConn: pgConn,
+ ctx: ctx,
+ }
+ result := &pgConn.resultReader
+
+ if err := pgConn.lock(); err != nil {
+ result.concludeCommand(CommandTag{}, err)
+ result.closed = true
+ return result
+ }
+
+ if len(paramValues) > math.MaxUint16 {
+ result.concludeCommand(CommandTag{}, fmt.Errorf("extended protocol limited to %v parameters", math.MaxUint16))
+ result.closed = true
+ pgConn.unlock()
+ return result
+ }
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ result.concludeCommand(CommandTag{}, newContextAlreadyDoneError(ctx))
+ result.closed = true
+ pgConn.unlock()
+ return result
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ }
+
+ return result
+}
+
+func (pgConn *PgConn) execExtendedSuffix(result *ResultReader) {
+ pgConn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'P'})
+ pgConn.frontend.SendExecute(&pgproto3.Execute{})
+ pgConn.frontend.SendSync(&pgproto3.Sync{})
+
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ result.concludeCommand(CommandTag{}, err)
+ pgConn.contextWatcher.Unwatch()
+ result.closed = true
+ pgConn.unlock()
+ return
+ }
+
+ result.readUntilRowDescription()
+}
+
+// CopyTo executes the copy command sql and copies the results to w.
+func (pgConn *PgConn) CopyTo(ctx context.Context, w io.Writer, sql string) (CommandTag, error) {
+ if err := pgConn.lock(); err != nil {
+ return CommandTag{}, err
+ }
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ pgConn.unlock()
+ return CommandTag{}, newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ // Send copy to command
+ pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
+
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ pgConn.unlock()
+ return CommandTag{}, err
+ }
+
+ // Read results
+ var commandTag CommandTag
+ var pgErr error
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.CopyDone:
+ case *pgproto3.CopyData:
+ _, err := w.Write(msg.Data)
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, err
+ }
+ case *pgproto3.ReadyForQuery:
+ pgConn.unlock()
+ return commandTag, pgErr
+ case *pgproto3.CommandComplete:
+ commandTag = pgConn.makeCommandTag(msg.CommandTag)
+ case *pgproto3.ErrorResponse:
+ pgErr = ErrorResponseToPgError(msg)
+ }
+ }
+}
+
+// CopyFrom executes the copy command sql and copies all of r to the PostgreSQL server.
+//
+// Note: context cancellation will only interrupt operations on the underlying PostgreSQL network connection. Reads on r
+// could still block.
+func (pgConn *PgConn) CopyFrom(ctx context.Context, r io.Reader, sql string) (CommandTag, error) {
+ if err := pgConn.lock(); err != nil {
+ return CommandTag{}, err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return CommandTag{}, newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ // Send copy from query
+ pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, err
+ }
+
+ // Send copy data
+ abortCopyChan := make(chan struct{})
+ copyErrChan := make(chan error, 1)
+ signalMessageChan := pgConn.signalMessage()
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ buf := iobufpool.Get(65536)
+ defer iobufpool.Put(buf)
+ (*buf)[0] = 'd'
+
+ for {
+ n, readErr := r.Read((*buf)[5:cap(*buf)])
+ if n > 0 {
+ *buf = (*buf)[0 : n+5]
+ pgio.SetInt32((*buf)[1:], int32(n+4))
+
+ writeErr := pgConn.frontend.SendUnbufferedEncodedCopyData(*buf)
+ if writeErr != nil {
+ // Write errors are always fatal, but we can't use asyncClose because we are in a different goroutine. Not
+ // setting pgConn.status or closing pgConn.cleanupDone for the same reason.
+ pgConn.conn.Close()
+
+ copyErrChan <- writeErr
+ return
+ }
+ }
+ if readErr != nil {
+ copyErrChan <- readErr
+ return
+ }
+
+ select {
+ case <-abortCopyChan:
+ return
+ default:
+ }
+ }
+ }()
+
+ var pgErr error
+ var copyErr error
+ for copyErr == nil && pgErr == nil {
+ select {
+ case copyErr = <-copyErrChan:
+ case <-signalMessageChan:
+ // If pgConn.receiveMessage encounters an error it will call pgConn.asyncClose. But that is a race condition with
+ // the goroutine. So instead check pgConn.bufferingReceiveErr which will have been set by the signalMessage. If an
+ // error is found then forcibly close the connection without sending the Terminate message.
+ if err := pgConn.bufferingReceiveErr; err != nil {
+ pgConn.status = connStatusClosed
+ pgConn.conn.Close()
+ close(pgConn.cleanupDone)
+ return CommandTag{}, normalizeTimeoutError(ctx, err)
+ }
+ msg, _ := pgConn.receiveMessage()
+
+ switch msg := msg.(type) {
+ case *pgproto3.ErrorResponse:
+ pgErr = ErrorResponseToPgError(msg)
+ default:
+ signalMessageChan = pgConn.signalMessage()
+ }
+ }
+ }
+ close(abortCopyChan)
+ // Make sure io goroutine finishes before writing.
+ wg.Wait()
+
+ if copyErr == io.EOF || pgErr != nil {
+ pgConn.frontend.Send(&pgproto3.CopyDone{})
+ } else {
+ pgConn.frontend.Send(&pgproto3.CopyFail{Message: copyErr.Error()})
+ }
+ err = pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, err
+ }
+
+ // Read results
+ var commandTag CommandTag
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ return commandTag, pgErr
+ case *pgproto3.CommandComplete:
+ commandTag = pgConn.makeCommandTag(msg.CommandTag)
+ case *pgproto3.ErrorResponse:
+ pgErr = ErrorResponseToPgError(msg)
+ }
+ }
+}
+
+// MultiResultReader is a reader for a command that could return multiple results such as Exec or ExecBatch.
+type MultiResultReader struct {
+ pgConn *PgConn
+ ctx context.Context
+
+ rr *ResultReader
+
+ closed bool
+ err error
+}
+
+// ReadAll reads all available results. Calling ReadAll is mutually exclusive with all other MultiResultReader methods.
+func (mrr *MultiResultReader) ReadAll() ([]*Result, error) {
+ var results []*Result
+
+ for mrr.NextResult() {
+ results = append(results, mrr.ResultReader().Read())
+ }
+ err := mrr.Close()
+
+ return results, err
+}
+
+func (mrr *MultiResultReader) receiveMessage() (pgproto3.BackendMessage, error) {
+ msg, err := mrr.pgConn.receiveMessage()
+ if err != nil {
+ mrr.pgConn.contextWatcher.Unwatch()
+ mrr.err = normalizeTimeoutError(mrr.ctx, err)
+ mrr.closed = true
+ mrr.pgConn.asyncClose()
+ return nil, mrr.err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ mrr.closed = true
+ mrr.pgConn.contextWatcher.Unwatch()
+ mrr.pgConn.unlock()
+ case *pgproto3.ErrorResponse:
+ mrr.err = ErrorResponseToPgError(msg)
+ }
+
+ return msg, nil
+}
+
+// NextResult returns advances the MultiResultReader to the next result and returns true if a result is available.
+func (mrr *MultiResultReader) NextResult() bool {
+ for !mrr.closed && mrr.err == nil {
+ msg, err := mrr.receiveMessage()
+ if err != nil {
+ return false
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ mrr.pgConn.resultReader = ResultReader{
+ pgConn: mrr.pgConn,
+ multiResultReader: mrr,
+ ctx: mrr.ctx,
+ fieldDescriptions: mrr.pgConn.convertRowDescription(mrr.pgConn.fieldDescriptions[:], msg),
+ }
+
+ mrr.rr = &mrr.pgConn.resultReader
+ return true
+ case *pgproto3.CommandComplete:
+ mrr.pgConn.resultReader = ResultReader{
+ commandTag: mrr.pgConn.makeCommandTag(msg.CommandTag),
+ commandConcluded: true,
+ closed: true,
+ }
+ mrr.rr = &mrr.pgConn.resultReader
+ return true
+ case *pgproto3.EmptyQueryResponse:
+ return false
+ }
+ }
+
+ return false
+}
+
+// ResultReader returns the current ResultReader.
+func (mrr *MultiResultReader) ResultReader() *ResultReader {
+ return mrr.rr
+}
+
+// Close closes the MultiResultReader and returns the first error that occurred during the MultiResultReader's use.
+func (mrr *MultiResultReader) Close() error {
+ for !mrr.closed {
+ _, err := mrr.receiveMessage()
+ if err != nil {
+ return mrr.err
+ }
+ }
+
+ return mrr.err
+}
+
+// ResultReader is a reader for the result of a single query.
+type ResultReader struct {
+ pgConn *PgConn
+ multiResultReader *MultiResultReader
+ pipeline *Pipeline
+ ctx context.Context
+
+ fieldDescriptions []FieldDescription
+ rowValues [][]byte
+ commandTag CommandTag
+ commandConcluded bool
+ closed bool
+ err error
+}
+
+// Result is the saved query response that is returned by calling Read on a ResultReader.
+type Result struct {
+ FieldDescriptions []FieldDescription
+ Rows [][][]byte
+ CommandTag CommandTag
+ Err error
+}
+
+// Read saves the query response to a Result.
+func (rr *ResultReader) Read() *Result {
+ br := &Result{}
+
+ for rr.NextRow() {
+ if br.FieldDescriptions == nil {
+ br.FieldDescriptions = make([]FieldDescription, len(rr.FieldDescriptions()))
+ copy(br.FieldDescriptions, rr.FieldDescriptions())
+ }
+
+ values := rr.Values()
+ row := make([][]byte, len(values))
+ for i := range row {
+ if values[i] != nil {
+ row[i] = make([]byte, len(values[i]))
+ copy(row[i], values[i])
+ }
+ }
+ br.Rows = append(br.Rows, row)
+ }
+
+ br.CommandTag, br.Err = rr.Close()
+
+ return br
+}
+
+// NextRow advances the ResultReader to the next row and returns true if a row is available.
+func (rr *ResultReader) NextRow() bool {
+ for !rr.commandConcluded {
+ msg, err := rr.receiveMessage()
+ if err != nil {
+ return false
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.DataRow:
+ rr.rowValues = msg.Values
+ return true
+ }
+ }
+
+ return false
+}
+
+// FieldDescriptions returns the field descriptions for the current result set. The returned slice is only valid until
+// the ResultReader is closed. It may return nil (for example, if the query did not return a result set or an error was
+// encountered.)
+func (rr *ResultReader) FieldDescriptions() []FieldDescription {
+ return rr.fieldDescriptions
+}
+
+// Values returns the current row data. NextRow must have been previously been called. The returned [][]byte is only
+// valid until the next NextRow call or the ResultReader is closed.
+func (rr *ResultReader) Values() [][]byte {
+ return rr.rowValues
+}
+
+// Close consumes any remaining result data and returns the command tag or
+// error.
+func (rr *ResultReader) Close() (CommandTag, error) {
+ if rr.closed {
+ return rr.commandTag, rr.err
+ }
+ rr.closed = true
+
+ for !rr.commandConcluded {
+ _, err := rr.receiveMessage()
+ if err != nil {
+ return CommandTag{}, rr.err
+ }
+ }
+
+ if rr.multiResultReader == nil && rr.pipeline == nil {
+ for {
+ msg, err := rr.receiveMessage()
+ if err != nil {
+ return CommandTag{}, rr.err
+ }
+
+ switch msg := msg.(type) {
+ // Detect a deferred constraint violation where the ErrorResponse is sent after CommandComplete.
+ case *pgproto3.ErrorResponse:
+ rr.err = ErrorResponseToPgError(msg)
+ case *pgproto3.ReadyForQuery:
+ rr.pgConn.contextWatcher.Unwatch()
+ rr.pgConn.unlock()
+ return rr.commandTag, rr.err
+ }
+ }
+ }
+
+ return rr.commandTag, rr.err
+}
+
+// readUntilRowDescription ensures the ResultReader's fieldDescriptions are loaded. It does not return an error as any
+// error will be stored in the ResultReader.
+func (rr *ResultReader) readUntilRowDescription() {
+ for !rr.commandConcluded {
+ // Peek before receive to avoid consuming a DataRow if the result set does not include a RowDescription method.
+ // This should never happen under normal pgconn usage, but it is possible if SendBytes and ReceiveResults are
+ // manually used to construct a query that does not issue a describe statement.
+ msg, _ := rr.pgConn.peekMessage()
+ if _, ok := msg.(*pgproto3.DataRow); ok {
+ return
+ }
+
+ // Consume the message
+ msg, _ = rr.receiveMessage()
+ if _, ok := msg.(*pgproto3.RowDescription); ok {
+ return
+ }
+ }
+}
+
+func (rr *ResultReader) receiveMessage() (msg pgproto3.BackendMessage, err error) {
+ if rr.multiResultReader == nil {
+ msg, err = rr.pgConn.receiveMessage()
+ } else {
+ msg, err = rr.multiResultReader.receiveMessage()
+ }
+
+ if err != nil {
+ err = normalizeTimeoutError(rr.ctx, err)
+ rr.concludeCommand(CommandTag{}, err)
+ rr.pgConn.contextWatcher.Unwatch()
+ rr.closed = true
+ if rr.multiResultReader == nil {
+ rr.pgConn.asyncClose()
+ }
+
+ return nil, rr.err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ rr.fieldDescriptions = rr.pgConn.convertRowDescription(rr.pgConn.fieldDescriptions[:], msg)
+ case *pgproto3.CommandComplete:
+ rr.concludeCommand(rr.pgConn.makeCommandTag(msg.CommandTag), nil)
+ case *pgproto3.EmptyQueryResponse:
+ rr.concludeCommand(CommandTag{}, nil)
+ case *pgproto3.ErrorResponse:
+ pgErr := ErrorResponseToPgError(msg)
+ if rr.pipeline != nil {
+ rr.pipeline.state.HandleError(pgErr)
+ }
+ rr.concludeCommand(CommandTag{}, pgErr)
+ }
+
+ return msg, nil
+}
+
+func (rr *ResultReader) concludeCommand(commandTag CommandTag, err error) {
+ // Keep the first error that is recorded. Store the error before checking if the command is already concluded to
+ // allow for receiving an error after CommandComplete but before ReadyForQuery.
+ if err != nil && rr.err == nil {
+ rr.err = err
+ }
+
+ if rr.commandConcluded {
+ return
+ }
+
+ rr.commandTag = commandTag
+ rr.rowValues = nil
+ rr.commandConcluded = true
+}
+
+// Batch is a collection of queries that can be sent to the PostgreSQL server in a single round-trip.
+type Batch struct {
+ buf []byte
+ err error
+}
+
+// ExecParams appends an ExecParams command to the batch. See PgConn.ExecParams for parameter descriptions.
+func (batch *Batch) ExecParams(sql string, paramValues [][]byte, paramOIDs []uint32, paramFormats []int16, resultFormats []int16) {
+ if batch.err != nil {
+ return
+ }
+
+ batch.buf, batch.err = (&pgproto3.Parse{Query: sql, ParameterOIDs: paramOIDs}).Encode(batch.buf)
+ if batch.err != nil {
+ return
+ }
+ batch.ExecPrepared("", paramValues, paramFormats, resultFormats)
+}
+
+// ExecPrepared appends an ExecPrepared e command to the batch. See PgConn.ExecPrepared for parameter descriptions.
+func (batch *Batch) ExecPrepared(stmtName string, paramValues [][]byte, paramFormats []int16, resultFormats []int16) {
+ if batch.err != nil {
+ return
+ }
+
+ batch.buf, batch.err = (&pgproto3.Bind{PreparedStatement: stmtName, ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats}).Encode(batch.buf)
+ if batch.err != nil {
+ return
+ }
+
+ batch.buf, batch.err = (&pgproto3.Describe{ObjectType: 'P'}).Encode(batch.buf)
+ if batch.err != nil {
+ return
+ }
+
+ batch.buf, batch.err = (&pgproto3.Execute{}).Encode(batch.buf)
+ if batch.err != nil {
+ return
+ }
+}
+
+// ExecBatch executes all the queries in batch in a single round-trip. Execution is implicitly transactional unless a
+// transaction is already in progress or SQL contains transaction control statements. This is a simpler way of executing
+// multiple queries in a single round trip than using pipeline mode.
+func (pgConn *PgConn) ExecBatch(ctx context.Context, batch *Batch) *MultiResultReader {
+ if batch.err != nil {
+ return &MultiResultReader{
+ closed: true,
+ err: batch.err,
+ }
+ }
+
+ if err := pgConn.lock(); err != nil {
+ return &MultiResultReader{
+ closed: true,
+ err: err,
+ }
+ }
+
+ pgConn.multiResultReader = MultiResultReader{
+ pgConn: pgConn,
+ ctx: ctx,
+ }
+ multiResult := &pgConn.multiResultReader
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ multiResult.closed = true
+ multiResult.err = newContextAlreadyDoneError(ctx)
+ pgConn.unlock()
+ return multiResult
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ }
+
+ batch.buf, batch.err = (&pgproto3.Sync{}).Encode(batch.buf)
+ if batch.err != nil {
+ pgConn.contextWatcher.Unwatch()
+ multiResult.err = normalizeTimeoutError(multiResult.ctx, batch.err)
+ multiResult.closed = true
+ pgConn.asyncClose()
+ return multiResult
+ }
+
+ pgConn.enterPotentialWriteReadDeadlock()
+ defer pgConn.exitPotentialWriteReadDeadlock()
+ _, err := pgConn.conn.Write(batch.buf)
+ if err != nil {
+ pgConn.contextWatcher.Unwatch()
+ multiResult.err = normalizeTimeoutError(multiResult.ctx, err)
+ multiResult.closed = true
+ pgConn.asyncClose()
+ return multiResult
+ }
+
+ return multiResult
+}
+
+// EscapeString escapes a string such that it can safely be interpolated into a SQL command string. It does not include
+// the surrounding single quotes.
+//
+// The current implementation requires that standard_conforming_strings=on and client_encoding="UTF8". If these
+// conditions are not met an error will be returned. It is possible these restrictions will be lifted in the future.
+func (pgConn *PgConn) EscapeString(s string) (string, error) {
+ if pgConn.ParameterStatus("standard_conforming_strings") != "on" {
+ return "", errors.New("EscapeString must be run with standard_conforming_strings=on")
+ }
+
+ if pgConn.ParameterStatus("client_encoding") != "UTF8" {
+ return "", errors.New("EscapeString must be run with client_encoding=UTF8")
+ }
+
+ return strings.Replace(s, "'", "''", -1), nil
+}
+
+// CheckConn checks the underlying connection without writing any bytes. This is currently implemented by doing a read
+// with a very short deadline. This can be useful because a TCP connection can be broken such that a write will appear
+// to succeed even though it will never actually reach the server. Reading immediately before a write will detect this
+// condition. If this is done immediately before sending a query it reduces the chances a query will be sent that fails
+// without the client knowing whether the server received it or not.
+//
+// Deprecated: CheckConn is deprecated in favor of Ping. CheckConn cannot detect all types of broken connections where
+// the write would still appear to succeed. Prefer Ping unless on a high latency connection.
+func (pgConn *PgConn) CheckConn() error {
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
+ defer cancel()
+
+ _, err := pgConn.ReceiveMessage(ctx)
+ if err != nil {
+ if !Timeout(err) {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Ping pings the server. This can be useful because a TCP connection can be broken such that a write will appear to
+// succeed even though it will never actually reach the server. Pinging immediately before sending a query reduces the
+// chances a query will be sent that fails without the client knowing whether the server received it or not.
+func (pgConn *PgConn) Ping(ctx context.Context) error {
+ return pgConn.Exec(ctx, "-- ping").Close()
+}
+
+// makeCommandTag makes a CommandTag. It does not retain a reference to buf or buf's underlying memory.
+func (pgConn *PgConn) makeCommandTag(buf []byte) CommandTag {
+ return CommandTag{s: string(buf)}
+}
+
+// enterPotentialWriteReadDeadlock must be called before a write that could deadlock if the server is simultaneously
+// blocked writing to us.
+func (pgConn *PgConn) enterPotentialWriteReadDeadlock() {
+ // The time to wait is somewhat arbitrary. A Write should only take as long as the syscall and memcpy to the OS
+ // outbound network buffer unless the buffer is full (which potentially is a block). It needs to be long enough for
+ // the normal case, but short enough not to kill performance if a block occurs.
+ //
+ // In addition, on Windows the default timer resolution is 15.6ms. So setting the timer to less than that is
+ // ineffective.
+ if pgConn.slowWriteTimer.Reset(15 * time.Millisecond) {
+ panic("BUG: slow write timer already active")
+ }
+}
+
+// exitPotentialWriteReadDeadlock must be called after a call to enterPotentialWriteReadDeadlock.
+func (pgConn *PgConn) exitPotentialWriteReadDeadlock() {
+ if !pgConn.slowWriteTimer.Stop() {
+ // The timer starts its function in a separate goroutine. It is necessary to ensure the background reader has
+ // started before calling Stop. Otherwise, the background reader may not be stopped. That on its own is not a
+ // serious problem. But what is a serious problem is that the background reader may start at an inopportune time in
+ // a subsequent query. For example, if a subsequent query was canceled then a deadline may be set on the net.Conn to
+ // interrupt an in-progress read. After the read is interrupted, but before the deadline is cleared, the background
+ // reader could start and read a deadline error. Then the next query would receive the an unexpected deadline error.
+ <-pgConn.bgReaderStarted
+ pgConn.bgReader.Stop()
+ }
+}
+
+func (pgConn *PgConn) flushWithPotentialWriteReadDeadlock() error {
+ pgConn.enterPotentialWriteReadDeadlock()
+ defer pgConn.exitPotentialWriteReadDeadlock()
+ err := pgConn.frontend.Flush()
+ return err
+}
+
+// SyncConn prepares the underlying net.Conn for direct use. PgConn may internally buffer reads or use goroutines for
+// background IO. This means that any direct use of the underlying net.Conn may be corrupted if a read is already
+// buffered or a read is in progress. SyncConn drains read buffers and stops background IO. In some cases this may
+// require sending a ping to the server. ctx can be used to cancel this operation. This should be called before any
+// operation that will use the underlying net.Conn directly. e.g. Before Conn() or Hijack().
+//
+// This should not be confused with the PostgreSQL protocol Sync message.
+func (pgConn *PgConn) SyncConn(ctx context.Context) error {
+ for i := 0; i < 10; i++ {
+ if pgConn.bgReader.Status() == bgreader.StatusStopped && pgConn.frontend.ReadBufferLen() == 0 {
+ return nil
+ }
+
+ err := pgConn.Ping(ctx)
+ if err != nil {
+ return fmt.Errorf("SyncConn: Ping failed while syncing conn: %w", err)
+ }
+ }
+
+ // This should never happen. Only way I can imagine this occurring is if the server is constantly sending data such as
+ // LISTEN/NOTIFY or log notifications such that we never can get an empty buffer.
+ return errors.New("SyncConn: conn never synchronized")
+}
+
+// CustomData returns a map that can be used to associate custom data with the connection.
+func (pgConn *PgConn) CustomData() map[string]any {
+ return pgConn.customData
+}
+
+// HijackedConn is the result of hijacking a connection.
+//
+// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
+// compatibility.
+type HijackedConn struct {
+ Conn net.Conn
+ PID uint32 // backend pid
+ SecretKey uint32 // key to use to send a cancel query message to the server
+ ParameterStatuses map[string]string // parameters that have been reported by the server
+ TxStatus byte
+ Frontend *pgproto3.Frontend
+ Config *Config
+ CustomData map[string]any
+}
+
+// Hijack extracts the internal connection data. pgConn must be in an idle state. SyncConn should be called immediately
+// before Hijack. pgConn is unusable after hijacking. Hijacking is typically only useful when using pgconn to establish
+// a connection, but taking complete control of the raw connection after that (e.g. a load balancer or proxy).
+//
+// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
+// compatibility.
+func (pgConn *PgConn) Hijack() (*HijackedConn, error) {
+ if err := pgConn.lock(); err != nil {
+ return nil, err
+ }
+ pgConn.status = connStatusClosed
+
+ return &HijackedConn{
+ Conn: pgConn.conn,
+ PID: pgConn.pid,
+ SecretKey: pgConn.secretKey,
+ ParameterStatuses: pgConn.parameterStatuses,
+ TxStatus: pgConn.txStatus,
+ Frontend: pgConn.frontend,
+ Config: pgConn.config,
+ CustomData: pgConn.customData,
+ }, nil
+}
+
+// Construct created a PgConn from an already established connection to a PostgreSQL server. This is the inverse of
+// PgConn.Hijack. The connection must be in an idle state.
+//
+// hc.Frontend is replaced by a new pgproto3.Frontend built by hc.Config.BuildFrontend.
+//
+// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
+// compatibility.
+func Construct(hc *HijackedConn) (*PgConn, error) {
+ pgConn := &PgConn{
+ conn: hc.Conn,
+ pid: hc.PID,
+ secretKey: hc.SecretKey,
+ parameterStatuses: hc.ParameterStatuses,
+ txStatus: hc.TxStatus,
+ frontend: hc.Frontend,
+ config: hc.Config,
+ customData: hc.CustomData,
+
+ status: connStatusIdle,
+
+ cleanupDone: make(chan struct{}),
+ }
+
+ pgConn.contextWatcher = ctxwatch.NewContextWatcher(hc.Config.BuildContextWatcherHandler(pgConn))
+ pgConn.bgReader = bgreader.New(pgConn.conn)
+ pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
+ func() {
+ pgConn.bgReader.Start()
+ pgConn.bgReaderStarted <- struct{}{}
+ },
+ )
+ pgConn.slowWriteTimer.Stop()
+ pgConn.bgReaderStarted = make(chan struct{})
+ pgConn.frontend = hc.Config.BuildFrontend(pgConn.bgReader, pgConn.conn)
+
+ return pgConn, nil
+}
+
+// Pipeline represents a connection in pipeline mode.
+//
+// SendPrepare, SendQueryParams, and SendQueryPrepared queue requests to the server. These requests are not written until
+// pipeline is flushed by Flush or Sync. Sync must be called after the last request is queued. Requests between
+// synchronization points are implicitly transactional unless explicit transaction control statements have been issued.
+//
+// The context the pipeline was started with is in effect for the entire life of the Pipeline.
+//
+// For a deeper understanding of pipeline mode see the PostgreSQL documentation for the extended query protocol
+// (https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY) and the libpq pipeline mode
+// (https://www.postgresql.org/docs/current/libpq-pipeline-mode.html).
+type Pipeline struct {
+ conn *PgConn
+ ctx context.Context
+
+ state pipelineState
+ err error
+ closed bool
+}
+
+// PipelineSync is returned by GetResults when a ReadyForQuery message is received.
+type PipelineSync struct{}
+
+// CloseComplete is returned by GetResults when a CloseComplete message is received.
+type CloseComplete struct{}
+
+type pipelineRequestType int
+
+const (
+ pipelineNil pipelineRequestType = iota
+ pipelinePrepare
+ pipelineQueryParams
+ pipelineQueryPrepared
+ pipelineDeallocate
+ pipelineSyncRequest
+ pipelineFlushRequest
+)
+
+type pipelineRequestEvent struct {
+ RequestType pipelineRequestType
+ WasSentToServer bool
+ BeforeFlushOrSync bool
+}
+
+type pipelineState struct {
+ requestEventQueue list.List
+ lastRequestType pipelineRequestType
+ pgErr *PgError
+ expectedReadyForQueryCount int
+}
+
+func (s *pipelineState) Init() {
+ s.requestEventQueue.Init()
+ s.lastRequestType = pipelineNil
+}
+
+func (s *pipelineState) RegisterSendingToServer() {
+ for elem := s.requestEventQueue.Back(); elem != nil; elem = elem.Prev() {
+ val := elem.Value.(pipelineRequestEvent)
+ if val.WasSentToServer {
+ return
+ }
+ val.WasSentToServer = true
+ elem.Value = val
+ }
+}
+
+func (s *pipelineState) registerFlushingBufferOnServer() {
+ for elem := s.requestEventQueue.Back(); elem != nil; elem = elem.Prev() {
+ val := elem.Value.(pipelineRequestEvent)
+ if val.BeforeFlushOrSync {
+ return
+ }
+ val.BeforeFlushOrSync = true
+ elem.Value = val
+ }
+}
+
+func (s *pipelineState) PushBackRequestType(req pipelineRequestType) {
+ if req == pipelineNil {
+ return
+ }
+
+ if req != pipelineFlushRequest {
+ s.requestEventQueue.PushBack(pipelineRequestEvent{RequestType: req})
+ }
+ if req == pipelineFlushRequest || req == pipelineSyncRequest {
+ s.registerFlushingBufferOnServer()
+ }
+ s.lastRequestType = req
+
+ if req == pipelineSyncRequest {
+ s.expectedReadyForQueryCount++
+ }
+}
+
+func (s *pipelineState) ExtractFrontRequestType() pipelineRequestType {
+ for {
+ elem := s.requestEventQueue.Front()
+ if elem == nil {
+ return pipelineNil
+ }
+ val := elem.Value.(pipelineRequestEvent)
+ if !(val.WasSentToServer && val.BeforeFlushOrSync) {
+ return pipelineNil
+ }
+
+ s.requestEventQueue.Remove(elem)
+ if val.RequestType == pipelineSyncRequest {
+ s.pgErr = nil
+ }
+ if s.pgErr == nil {
+ return val.RequestType
+ }
+ }
+}
+
+func (s *pipelineState) HandleError(err *PgError) {
+ s.pgErr = err
+}
+
+func (s *pipelineState) HandleReadyForQuery() {
+ s.expectedReadyForQueryCount--
+}
+
+func (s *pipelineState) PendingSync() bool {
+ var notPendingSync bool
+
+ if elem := s.requestEventQueue.Back(); elem != nil {
+ val := elem.Value.(pipelineRequestEvent)
+ notPendingSync = (val.RequestType == pipelineSyncRequest) && val.WasSentToServer
+ } else {
+ notPendingSync = (s.lastRequestType == pipelineSyncRequest) || (s.lastRequestType == pipelineNil)
+ }
+
+ return !notPendingSync
+}
+
+func (s *pipelineState) ExpectedReadyForQuery() int {
+ return s.expectedReadyForQueryCount
+}
+
+// StartPipeline switches the connection to pipeline mode and returns a *Pipeline. In pipeline mode requests can be sent
+// to the server without waiting for a response. Close must be called on the returned *Pipeline to return the connection
+// to normal mode. While in pipeline mode, no methods that communicate with the server may be called except
+// CancelRequest and Close. ctx is in effect for entire life of the *Pipeline.
+//
+// Prefer ExecBatch when only sending one group of queries at once.
+func (pgConn *PgConn) StartPipeline(ctx context.Context) *Pipeline {
+ if err := pgConn.lock(); err != nil {
+ pipeline := &Pipeline{
+ closed: true,
+ err: err,
+ }
+ pipeline.state.Init()
+
+ return pipeline
+ }
+
+ pgConn.pipeline = Pipeline{
+ conn: pgConn,
+ ctx: ctx,
+ }
+ pgConn.pipeline.state.Init()
+
+ pipeline := &pgConn.pipeline
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ pipeline.closed = true
+ pipeline.err = newContextAlreadyDoneError(ctx)
+ pgConn.unlock()
+ return pipeline
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ }
+
+ return pipeline
+}
+
+// SendPrepare is the pipeline version of *PgConn.Prepare.
+func (p *Pipeline) SendPrepare(name, sql string, paramOIDs []uint32) {
+ if p.closed {
+ return
+ }
+
+ p.conn.frontend.SendParse(&pgproto3.Parse{Name: name, Query: sql, ParameterOIDs: paramOIDs})
+ p.conn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'S', Name: name})
+ p.state.PushBackRequestType(pipelinePrepare)
+}
+
+// SendDeallocate deallocates a prepared statement.
+func (p *Pipeline) SendDeallocate(name string) {
+ if p.closed {
+ return
+ }
+
+ p.conn.frontend.SendClose(&pgproto3.Close{ObjectType: 'S', Name: name})
+ p.state.PushBackRequestType(pipelineDeallocate)
+}
+
+// SendQueryParams is the pipeline version of *PgConn.QueryParams.
+func (p *Pipeline) SendQueryParams(sql string, paramValues [][]byte, paramOIDs []uint32, paramFormats []int16, resultFormats []int16) {
+ if p.closed {
+ return
+ }
+
+ p.conn.frontend.SendParse(&pgproto3.Parse{Query: sql, ParameterOIDs: paramOIDs})
+ p.conn.frontend.SendBind(&pgproto3.Bind{ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats})
+ p.conn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'P'})
+ p.conn.frontend.SendExecute(&pgproto3.Execute{})
+ p.state.PushBackRequestType(pipelineQueryParams)
+}
+
+// SendQueryPrepared is the pipeline version of *PgConn.QueryPrepared.
+func (p *Pipeline) SendQueryPrepared(stmtName string, paramValues [][]byte, paramFormats []int16, resultFormats []int16) {
+ if p.closed {
+ return
+ }
+
+ p.conn.frontend.SendBind(&pgproto3.Bind{PreparedStatement: stmtName, ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats})
+ p.conn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'P'})
+ p.conn.frontend.SendExecute(&pgproto3.Execute{})
+ p.state.PushBackRequestType(pipelineQueryPrepared)
+}
+
+// SendFlushRequest sends a request for the server to flush its output buffer.
+//
+// The server flushes its output buffer automatically as a result of Sync being called,
+// or on any request when not in pipeline mode; this function is useful to cause the server
+// to flush its output buffer in pipeline mode without establishing a synchronization point.
+// Note that the request is not itself flushed to the server automatically; use Flush if
+// necessary. This copies the behavior of libpq PQsendFlushRequest.
+func (p *Pipeline) SendFlushRequest() {
+ if p.closed {
+ return
+ }
+
+ p.conn.frontend.Send(&pgproto3.Flush{})
+ p.state.PushBackRequestType(pipelineFlushRequest)
+}
+
+// SendPipelineSync marks a synchronization point in a pipeline by sending a sync message
+// without flushing the send buffer. This serves as the delimiter of an implicit
+// transaction and an error recovery point.
+//
+// Note that the request is not itself flushed to the server automatically; use Flush if
+// necessary. This copies the behavior of libpq PQsendPipelineSync.
+func (p *Pipeline) SendPipelineSync() {
+ if p.closed {
+ return
+ }
+
+ p.conn.frontend.SendSync(&pgproto3.Sync{})
+ p.state.PushBackRequestType(pipelineSyncRequest)
+}
+
+// Flush flushes the queued requests without establishing a synchronization point.
+func (p *Pipeline) Flush() error {
+ if p.closed {
+ if p.err != nil {
+ return p.err
+ }
+ return errors.New("pipeline closed")
+ }
+
+ err := p.conn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ err = normalizeTimeoutError(p.ctx, err)
+
+ p.conn.asyncClose()
+
+ p.conn.contextWatcher.Unwatch()
+ p.conn.unlock()
+ p.closed = true
+ p.err = err
+ return err
+ }
+
+ p.state.RegisterSendingToServer()
+ return nil
+}
+
+// Sync establishes a synchronization point and flushes the queued requests.
+func (p *Pipeline) Sync() error {
+ p.SendPipelineSync()
+ return p.Flush()
+}
+
+// GetResults gets the next results. If results are present, results may be a *ResultReader, *StatementDescription, or
+// *PipelineSync. If an ErrorResponse is received from the server, results will be nil and err will be a *PgError. If no
+// results are available, results and err will both be nil.
+func (p *Pipeline) GetResults() (results any, err error) {
+ if p.closed {
+ if p.err != nil {
+ return nil, p.err
+ }
+ return nil, errors.New("pipeline closed")
+ }
+
+ if p.state.ExtractFrontRequestType() == pipelineNil {
+ return nil, nil
+ }
+
+ return p.getResults()
+}
+
+func (p *Pipeline) getResults() (results any, err error) {
+ for {
+ msg, err := p.conn.receiveMessage()
+ if err != nil {
+ p.closed = true
+ p.err = err
+ p.conn.asyncClose()
+ return nil, normalizeTimeoutError(p.ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ p.conn.resultReader = ResultReader{
+ pgConn: p.conn,
+ pipeline: p,
+ ctx: p.ctx,
+ fieldDescriptions: p.conn.convertRowDescription(p.conn.fieldDescriptions[:], msg),
+ }
+ return &p.conn.resultReader, nil
+ case *pgproto3.CommandComplete:
+ p.conn.resultReader = ResultReader{
+ commandTag: p.conn.makeCommandTag(msg.CommandTag),
+ commandConcluded: true,
+ closed: true,
+ }
+ return &p.conn.resultReader, nil
+ case *pgproto3.ParseComplete:
+ peekedMsg, err := p.conn.peekMessage()
+ if err != nil {
+ p.conn.asyncClose()
+ return nil, normalizeTimeoutError(p.ctx, err)
+ }
+ if _, ok := peekedMsg.(*pgproto3.ParameterDescription); ok {
+ return p.getResultsPrepare()
+ }
+ case *pgproto3.CloseComplete:
+ return &CloseComplete{}, nil
+ case *pgproto3.ReadyForQuery:
+ p.state.HandleReadyForQuery()
+ return &PipelineSync{}, nil
+ case *pgproto3.ErrorResponse:
+ pgErr := ErrorResponseToPgError(msg)
+ p.state.HandleError(pgErr)
+ return nil, pgErr
+ }
+ }
+}
+
+func (p *Pipeline) getResultsPrepare() (*StatementDescription, error) {
+ psd := &StatementDescription{}
+
+ for {
+ msg, err := p.conn.receiveMessage()
+ if err != nil {
+ p.conn.asyncClose()
+ return nil, normalizeTimeoutError(p.ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ParameterDescription:
+ psd.ParamOIDs = make([]uint32, len(msg.ParameterOIDs))
+ copy(psd.ParamOIDs, msg.ParameterOIDs)
+ case *pgproto3.RowDescription:
+ psd.Fields = p.conn.convertRowDescription(nil, msg)
+ return psd, nil
+
+ // NoData is returned instead of RowDescription when there is no expected result. e.g. An INSERT without a RETURNING
+ // clause.
+ case *pgproto3.NoData:
+ return psd, nil
+
+ // These should never happen here. But don't take chances that could lead to a deadlock.
+ case *pgproto3.ErrorResponse:
+ pgErr := ErrorResponseToPgError(msg)
+ p.state.HandleError(pgErr)
+ return nil, pgErr
+ case *pgproto3.CommandComplete:
+ p.conn.asyncClose()
+ return nil, errors.New("BUG: received CommandComplete while handling Describe")
+ case *pgproto3.ReadyForQuery:
+ p.conn.asyncClose()
+ return nil, errors.New("BUG: received ReadyForQuery while handling Describe")
+ }
+ }
+}
+
+// Close closes the pipeline and returns the connection to normal mode.
+func (p *Pipeline) Close() error {
+ if p.closed {
+ return p.err
+ }
+
+ p.closed = true
+
+ if p.state.PendingSync() {
+ p.conn.asyncClose()
+ p.err = errors.New("pipeline has unsynced requests")
+ p.conn.contextWatcher.Unwatch()
+ p.conn.unlock()
+
+ return p.err
+ }
+
+ for p.state.ExpectedReadyForQuery() > 0 {
+ _, err := p.getResults()
+ if err != nil {
+ p.err = err
+ var pgErr *PgError
+ if !errors.As(err, &pgErr) {
+ p.conn.asyncClose()
+ break
+ }
+ }
+ }
+
+ p.conn.contextWatcher.Unwatch()
+ p.conn.unlock()
+
+ return p.err
+}
+
+// DeadlineContextWatcherHandler handles canceled contexts by setting a deadline on a net.Conn.
+type DeadlineContextWatcherHandler struct {
+ Conn net.Conn
+
+ // DeadlineDelay is the delay to set on the deadline set on net.Conn when the context is canceled.
+ DeadlineDelay time.Duration
+}
+
+func (h *DeadlineContextWatcherHandler) HandleCancel(ctx context.Context) {
+ h.Conn.SetDeadline(time.Now().Add(h.DeadlineDelay))
+}
+
+func (h *DeadlineContextWatcherHandler) HandleUnwatchAfterCancel() {
+ h.Conn.SetDeadline(time.Time{})
+}
+
+// CancelRequestContextWatcherHandler handles canceled contexts by sending a cancel request to the server. It also sets
+// a deadline on a net.Conn as a fallback.
+type CancelRequestContextWatcherHandler struct {
+ Conn *PgConn
+
+ // CancelRequestDelay is the delay before sending the cancel request to the server.
+ CancelRequestDelay time.Duration
+
+ // DeadlineDelay is the delay to set on the deadline set on net.Conn when the context is canceled.
+ DeadlineDelay time.Duration
+
+ cancelFinishedChan chan struct{}
+ handleUnwatchAfterCancelCalled func()
+}
+
+func (h *CancelRequestContextWatcherHandler) HandleCancel(context.Context) {
+ h.cancelFinishedChan = make(chan struct{})
+ var handleUnwatchedAfterCancelCalledCtx context.Context
+ handleUnwatchedAfterCancelCalledCtx, h.handleUnwatchAfterCancelCalled = context.WithCancel(context.Background())
+
+ deadline := time.Now().Add(h.DeadlineDelay)
+ h.Conn.conn.SetDeadline(deadline)
+
+ go func() {
+ defer close(h.cancelFinishedChan)
+
+ select {
+ case <-handleUnwatchedAfterCancelCalledCtx.Done():
+ return
+ case <-time.After(h.CancelRequestDelay):
+ }
+
+ cancelRequestCtx, cancel := context.WithDeadline(handleUnwatchedAfterCancelCalledCtx, deadline)
+ defer cancel()
+ h.Conn.CancelRequest(cancelRequestCtx)
+
+ // CancelRequest is inherently racy. Even though the cancel request has been received by the server at this point,
+ // it hasn't necessarily been delivered to the other connection. If we immediately return and the connection is
+ // immediately used then it is possible the CancelRequest will actually cancel our next query. The
+ // TestCancelRequestContextWatcherHandler Stress test can produce this error without the sleep below. The sleep time
+ // is arbitrary, but should be sufficient to prevent this error case.
+ time.Sleep(100 * time.Millisecond)
+ }()
+}
+
+func (h *CancelRequestContextWatcherHandler) HandleUnwatchAfterCancel() {
+ h.handleUnwatchAfterCancelCalled()
+ <-h.cancelFinishedChan
+
+ h.Conn.conn.SetDeadline(time.Time{})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/README.md b/vendor/github.com/jackc/pgx/v5/pgproto3/README.md
new file mode 100644
index 0000000..7a26f1c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/README.md
@@ -0,0 +1,7 @@
+# pgproto3
+
+Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
+
+pgproto3 can be used as a foundation for PostgreSQL drivers, proxies, mock servers, load balancers and more.
+
+See example/pgfortune for a playful example of a fake PostgreSQL server.
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_cleartext_password.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_cleartext_password.go
new file mode 100644
index 0000000..ac2962e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_cleartext_password.go
@@ -0,0 +1,51 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationCleartextPassword is a message sent from the backend indicating that a clear-text password is required.
+type AuthenticationCleartextPassword struct {
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationCleartextPassword) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationCleartextPassword) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationCleartextPassword) Decode(src []byte) error {
+ if len(src) != 4 {
+ return errors.New("bad authentication message size")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeCleartextPassword {
+ return errors.New("bad auth type")
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationCleartextPassword) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeCleartextPassword)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationCleartextPassword) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "AuthenticationCleartextPassword",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss.go
new file mode 100644
index 0000000..178ef31
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss.go
@@ -0,0 +1,58 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type AuthenticationGSS struct{}
+
+func (a *AuthenticationGSS) Backend() {}
+
+func (a *AuthenticationGSS) AuthenticationResponse() {}
+
+func (a *AuthenticationGSS) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeGSS {
+ return errors.New("bad auth type")
+ }
+ return nil
+}
+
+func (a *AuthenticationGSS) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeGSS)
+ return finishMessage(dst, sp)
+}
+
+func (a *AuthenticationGSS) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data []byte
+ }{
+ Type: "AuthenticationGSS",
+ })
+}
+
+func (a *AuthenticationGSS) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Type string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss_continue.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss_continue.go
new file mode 100644
index 0000000..2ba3f3b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss_continue.go
@@ -0,0 +1,67 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type AuthenticationGSSContinue struct {
+ Data []byte
+}
+
+func (a *AuthenticationGSSContinue) Backend() {}
+
+func (a *AuthenticationGSSContinue) AuthenticationResponse() {}
+
+func (a *AuthenticationGSSContinue) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeGSSCont {
+ return errors.New("bad auth type")
+ }
+
+ a.Data = src[4:]
+ return nil
+}
+
+func (a *AuthenticationGSSContinue) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeGSSCont)
+ dst = append(dst, a.Data...)
+ return finishMessage(dst, sp)
+}
+
+func (a *AuthenticationGSSContinue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data []byte
+ }{
+ Type: "AuthenticationGSSContinue",
+ Data: a.Data,
+ })
+}
+
+func (a *AuthenticationGSSContinue) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Type string
+ Data []byte
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ a.Data = msg.Data
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_md5_password.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_md5_password.go
new file mode 100644
index 0000000..854c640
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_md5_password.go
@@ -0,0 +1,76 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationMD5Password is a message sent from the backend indicating that an MD5 hashed password is required.
+type AuthenticationMD5Password struct {
+ Salt [4]byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationMD5Password) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationMD5Password) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationMD5Password) Decode(src []byte) error {
+ if len(src) != 8 {
+ return errors.New("bad authentication message size")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeMD5Password {
+ return errors.New("bad auth type")
+ }
+
+ copy(dst.Salt[:], src[4:8])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationMD5Password) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeMD5Password)
+ dst = append(dst, src.Salt[:]...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationMD5Password) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Salt [4]byte
+ }{
+ Type: "AuthenticationMD5Password",
+ Salt: src.Salt,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *AuthenticationMD5Password) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Type string
+ Salt [4]byte
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Salt = msg.Salt
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_ok.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_ok.go
new file mode 100644
index 0000000..ec11d39
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_ok.go
@@ -0,0 +1,51 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationOk is a message sent from the backend indicating that authentication was successful.
+type AuthenticationOk struct {
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationOk) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationOk) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationOk) Decode(src []byte) error {
+ if len(src) != 4 {
+ return errors.New("bad authentication message size")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeOk {
+ return errors.New("bad auth type")
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationOk) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeOk)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationOk) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "AuthenticationOK",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl.go
new file mode 100644
index 0000000..e66580f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl.go
@@ -0,0 +1,72 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationSASL is a message sent from the backend indicating that SASL authentication is required.
+type AuthenticationSASL struct {
+ AuthMechanisms []string
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationSASL) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationSASL) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationSASL) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeSASL {
+ return errors.New("bad auth type")
+ }
+
+ authMechanisms := src[4:]
+ for len(authMechanisms) > 1 {
+ idx := bytes.IndexByte(authMechanisms, 0)
+ if idx == -1 {
+ return &invalidMessageFormatErr{messageType: "AuthenticationSASL", details: "unterminated string"}
+ }
+ dst.AuthMechanisms = append(dst.AuthMechanisms, string(authMechanisms[:idx]))
+ authMechanisms = authMechanisms[idx+1:]
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationSASL) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeSASL)
+
+ for _, s := range src.AuthMechanisms {
+ dst = append(dst, []byte(s)...)
+ dst = append(dst, 0)
+ }
+ dst = append(dst, 0)
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationSASL) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ AuthMechanisms []string
+ }{
+ Type: "AuthenticationSASL",
+ AuthMechanisms: src.AuthMechanisms,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_continue.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_continue.go
new file mode 100644
index 0000000..70fba4a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_continue.go
@@ -0,0 +1,75 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationSASLContinue is a message sent from the backend containing a SASL challenge.
+type AuthenticationSASLContinue struct {
+ Data []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationSASLContinue) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationSASLContinue) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationSASLContinue) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeSASLContinue {
+ return errors.New("bad auth type")
+ }
+
+ dst.Data = src[4:]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationSASLContinue) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeSASLContinue)
+ dst = append(dst, src.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationSASLContinue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "AuthenticationSASLContinue",
+ Data: string(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *AuthenticationSASLContinue) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Data = []byte(msg.Data)
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_final.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_final.go
new file mode 100644
index 0000000..84976c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_final.go
@@ -0,0 +1,75 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationSASLFinal is a message sent from the backend indicating a SASL authentication has completed.
+type AuthenticationSASLFinal struct {
+ Data []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationSASLFinal) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationSASLFinal) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationSASLFinal) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeSASLFinal {
+ return errors.New("bad auth type")
+ }
+
+ dst.Data = src[4:]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationSASLFinal) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeSASLFinal)
+ dst = append(dst, src.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Unmarshaler.
+func (src AuthenticationSASLFinal) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "AuthenticationSASLFinal",
+ Data: string(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *AuthenticationSASLFinal) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Data = []byte(msg.Data)
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/backend.go b/vendor/github.com/jackc/pgx/v5/pgproto3/backend.go
new file mode 100644
index 0000000..28cff04
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/backend.go
@@ -0,0 +1,299 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// Backend acts as a server for the PostgreSQL wire protocol version 3.
+type Backend struct {
+ cr *chunkReader
+ w io.Writer
+
+ // tracer is used to trace messages when Send or Receive is called. This means an outbound message is traced
+ // before it is actually transmitted (i.e. before Flush).
+ tracer *tracer
+
+ wbuf []byte
+ encodeError error
+
+ // Frontend message flyweights
+ bind Bind
+ cancelRequest CancelRequest
+ _close Close
+ copyFail CopyFail
+ copyData CopyData
+ copyDone CopyDone
+ describe Describe
+ execute Execute
+ flush Flush
+ functionCall FunctionCall
+ gssEncRequest GSSEncRequest
+ parse Parse
+ query Query
+ sslRequest SSLRequest
+ startupMessage StartupMessage
+ sync Sync
+ terminate Terminate
+
+ bodyLen int
+ maxBodyLen int // maxBodyLen is the maximum length of a message body in octets. If a message body exceeds this length, Receive will return an error.
+ msgType byte
+ partialMsg bool
+ authType uint32
+}
+
+const (
+ minStartupPacketLen = 4 // minStartupPacketLen is a single 32-bit int version or code.
+ maxStartupPacketLen = 10000 // maxStartupPacketLen is MAX_STARTUP_PACKET_LENGTH from PG source.
+)
+
+// NewBackend creates a new Backend.
+func NewBackend(r io.Reader, w io.Writer) *Backend {
+ cr := newChunkReader(r, 0)
+ return &Backend{cr: cr, w: w}
+}
+
+// Send sends a message to the frontend (i.e. the client). The message is buffered until Flush is called. Any error
+// encountered will be returned from Flush.
+func (b *Backend) Send(msg BackendMessage) {
+ if b.encodeError != nil {
+ return
+ }
+
+ prevLen := len(b.wbuf)
+ newBuf, err := msg.Encode(b.wbuf)
+ if err != nil {
+ b.encodeError = err
+ return
+ }
+ b.wbuf = newBuf
+
+ if b.tracer != nil {
+ b.tracer.traceMessage('B', int32(len(b.wbuf)-prevLen), msg)
+ }
+}
+
+// Flush writes any pending messages to the frontend (i.e. the client).
+func (b *Backend) Flush() error {
+ if err := b.encodeError; err != nil {
+ b.encodeError = nil
+ b.wbuf = b.wbuf[:0]
+ return &writeError{err: err, safeToRetry: true}
+ }
+
+ n, err := b.w.Write(b.wbuf)
+
+ const maxLen = 1024
+ if len(b.wbuf) > maxLen {
+ b.wbuf = make([]byte, 0, maxLen)
+ } else {
+ b.wbuf = b.wbuf[:0]
+ }
+
+ if err != nil {
+ return &writeError{err: err, safeToRetry: n == 0}
+ }
+
+ return nil
+}
+
+// Trace starts tracing the message traffic to w. It writes in a similar format to that produced by the libpq function
+// PQtrace.
+func (b *Backend) Trace(w io.Writer, options TracerOptions) {
+ b.tracer = &tracer{
+ w: w,
+ buf: &bytes.Buffer{},
+ TracerOptions: options,
+ }
+}
+
+// Untrace stops tracing.
+func (b *Backend) Untrace() {
+ b.tracer = nil
+}
+
+// ReceiveStartupMessage receives the initial connection message. This method is used of the normal Receive method
+// because the initial connection message is "special" and does not include the message type as the first byte. This
+// will return either a StartupMessage, SSLRequest, GSSEncRequest, or CancelRequest.
+func (b *Backend) ReceiveStartupMessage() (FrontendMessage, error) {
+ buf, err := b.cr.Next(4)
+ if err != nil {
+ return nil, err
+ }
+ msgSize := int(binary.BigEndian.Uint32(buf) - 4)
+
+ if msgSize < minStartupPacketLen || msgSize > maxStartupPacketLen {
+ return nil, fmt.Errorf("invalid length of startup packet: %d", msgSize)
+ }
+
+ buf, err = b.cr.Next(msgSize)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ code := binary.BigEndian.Uint32(buf)
+
+ switch code {
+ case ProtocolVersionNumber:
+ err = b.startupMessage.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &b.startupMessage, nil
+ case sslRequestNumber:
+ err = b.sslRequest.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &b.sslRequest, nil
+ case cancelRequestCode:
+ err = b.cancelRequest.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &b.cancelRequest, nil
+ case gssEncReqNumber:
+ err = b.gssEncRequest.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &b.gssEncRequest, nil
+ default:
+ return nil, fmt.Errorf("unknown startup message code: %d", code)
+ }
+}
+
+// Receive receives a message from the frontend. The returned message is only valid until the next call to Receive.
+func (b *Backend) Receive() (FrontendMessage, error) {
+ if !b.partialMsg {
+ header, err := b.cr.Next(5)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ b.msgType = header[0]
+
+ msgLength := int(binary.BigEndian.Uint32(header[1:]))
+ if msgLength < 4 {
+ return nil, fmt.Errorf("invalid message length: %d", msgLength)
+ }
+
+ b.bodyLen = msgLength - 4
+ if b.maxBodyLen > 0 && b.bodyLen > b.maxBodyLen {
+ return nil, &ExceededMaxBodyLenErr{b.maxBodyLen, b.bodyLen}
+ }
+ b.partialMsg = true
+ }
+
+ var msg FrontendMessage
+ switch b.msgType {
+ case 'B':
+ msg = &b.bind
+ case 'C':
+ msg = &b._close
+ case 'D':
+ msg = &b.describe
+ case 'E':
+ msg = &b.execute
+ case 'F':
+ msg = &b.functionCall
+ case 'f':
+ msg = &b.copyFail
+ case 'd':
+ msg = &b.copyData
+ case 'c':
+ msg = &b.copyDone
+ case 'H':
+ msg = &b.flush
+ case 'P':
+ msg = &b.parse
+ case 'p':
+ switch b.authType {
+ case AuthTypeSASL:
+ msg = &SASLInitialResponse{}
+ case AuthTypeSASLContinue:
+ msg = &SASLResponse{}
+ case AuthTypeSASLFinal:
+ msg = &SASLResponse{}
+ case AuthTypeGSS, AuthTypeGSSCont:
+ msg = &GSSResponse{}
+ case AuthTypeCleartextPassword, AuthTypeMD5Password:
+ fallthrough
+ default:
+ // to maintain backwards compatibility
+ msg = &PasswordMessage{}
+ }
+ case 'Q':
+ msg = &b.query
+ case 'S':
+ msg = &b.sync
+ case 'X':
+ msg = &b.terminate
+ default:
+ return nil, fmt.Errorf("unknown message type: %c", b.msgType)
+ }
+
+ msgBody, err := b.cr.Next(b.bodyLen)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ b.partialMsg = false
+
+ err = msg.Decode(msgBody)
+ if err != nil {
+ return nil, err
+ }
+
+ if b.tracer != nil {
+ b.tracer.traceMessage('F', int32(5+len(msgBody)), msg)
+ }
+
+ return msg, nil
+}
+
+// SetAuthType sets the authentication type in the backend.
+// Since multiple message types can start with 'p', SetAuthType allows
+// contextual identification of FrontendMessages. For example, in the
+// PG message flow documentation for PasswordMessage:
+//
+// Byte1('p')
+//
+// Identifies the message as a password response. Note that this is also used for
+// GSSAPI, SSPI and SASL response messages. The exact message type can be deduced from
+// the context.
+//
+// Since the Frontend does not know about the state of a backend, it is important
+// to call SetAuthType() after an authentication request is received by the Frontend.
+func (b *Backend) SetAuthType(authType uint32) error {
+ switch authType {
+ case AuthTypeOk,
+ AuthTypeCleartextPassword,
+ AuthTypeMD5Password,
+ AuthTypeSCMCreds,
+ AuthTypeGSS,
+ AuthTypeGSSCont,
+ AuthTypeSSPI,
+ AuthTypeSASL,
+ AuthTypeSASLContinue,
+ AuthTypeSASLFinal:
+ b.authType = authType
+ default:
+ return fmt.Errorf("authType not recognized: %d", authType)
+ }
+
+ return nil
+}
+
+// SetMaxBodyLen sets the maximum length of a message body in octets.
+// If a message body exceeds this length, Receive will return an error.
+// This is useful for protecting against malicious clients that send
+// large messages with the intent of causing memory exhaustion.
+// The default value is 0.
+// If maxBodyLen is 0, then no maximum is enforced.
+func (b *Backend) SetMaxBodyLen(maxBodyLen int) {
+ b.maxBodyLen = maxBodyLen
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/backend_key_data.go b/vendor/github.com/jackc/pgx/v5/pgproto3/backend_key_data.go
new file mode 100644
index 0000000..23f5da6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/backend_key_data.go
@@ -0,0 +1,50 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type BackendKeyData struct {
+ ProcessID uint32
+ SecretKey uint32
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*BackendKeyData) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *BackendKeyData) Decode(src []byte) error {
+ if len(src) != 8 {
+ return &invalidMessageLenErr{messageType: "BackendKeyData", expectedLen: 8, actualLen: len(src)}
+ }
+
+ dst.ProcessID = binary.BigEndian.Uint32(src[:4])
+ dst.SecretKey = binary.BigEndian.Uint32(src[4:])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *BackendKeyData) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'K')
+ dst = pgio.AppendUint32(dst, src.ProcessID)
+ dst = pgio.AppendUint32(dst, src.SecretKey)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src BackendKeyData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProcessID uint32
+ SecretKey uint32
+ }{
+ Type: "BackendKeyData",
+ ProcessID: src.ProcessID,
+ SecretKey: src.SecretKey,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/big_endian.go b/vendor/github.com/jackc/pgx/v5/pgproto3/big_endian.go
new file mode 100644
index 0000000..f7bdb97
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/big_endian.go
@@ -0,0 +1,37 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+)
+
+type BigEndianBuf [8]byte
+
+func (b BigEndianBuf) Int16(n int16) []byte {
+ buf := b[0:2]
+ binary.BigEndian.PutUint16(buf, uint16(n))
+ return buf
+}
+
+func (b BigEndianBuf) Uint16(n uint16) []byte {
+ buf := b[0:2]
+ binary.BigEndian.PutUint16(buf, n)
+ return buf
+}
+
+func (b BigEndianBuf) Int32(n int32) []byte {
+ buf := b[0:4]
+ binary.BigEndian.PutUint32(buf, uint32(n))
+ return buf
+}
+
+func (b BigEndianBuf) Uint32(n uint32) []byte {
+ buf := b[0:4]
+ binary.BigEndian.PutUint32(buf, n)
+ return buf
+}
+
+func (b BigEndianBuf) Int64(n int64) []byte {
+ buf := b[0:8]
+ binary.BigEndian.PutUint64(buf, uint64(n))
+ return buf
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/bind.go b/vendor/github.com/jackc/pgx/v5/pgproto3/bind.go
new file mode 100644
index 0000000..ad6ac48
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/bind.go
@@ -0,0 +1,223 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Bind struct {
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters [][]byte
+ ResultFormatCodes []int16
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Bind) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Bind) Decode(src []byte) error {
+ *dst = Bind{}
+
+ idx := bytes.IndexByte(src, 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ dst.DestinationPortal = string(src[:idx])
+ rp := idx + 1
+
+ idx = bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ dst.PreparedStatement = string(src[rp : rp+idx])
+ rp += idx + 1
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ parameterFormatCodeCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if parameterFormatCodeCount > 0 {
+ dst.ParameterFormatCodes = make([]int16, parameterFormatCodeCount)
+
+ if len(src[rp:]) < len(dst.ParameterFormatCodes)*2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ for i := 0; i < parameterFormatCodeCount; i++ {
+ dst.ParameterFormatCodes[i] = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+ }
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ parameterCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if parameterCount > 0 {
+ dst.Parameters = make([][]byte, parameterCount)
+
+ for i := 0; i < parameterCount; i++ {
+ if len(src[rp:]) < 4 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+
+ msgSize := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ // null
+ if msgSize == -1 {
+ continue
+ }
+
+ if len(src[rp:]) < msgSize {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+
+ dst.Parameters[i] = src[rp : rp+msgSize]
+ rp += msgSize
+ }
+ }
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ resultFormatCodeCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ dst.ResultFormatCodes = make([]int16, resultFormatCodeCount)
+ if len(src[rp:]) < len(dst.ResultFormatCodes)*2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ for i := 0; i < resultFormatCodeCount; i++ {
+ dst.ResultFormatCodes[i] = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Bind) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'B')
+
+ dst = append(dst, src.DestinationPortal...)
+ dst = append(dst, 0)
+ dst = append(dst, src.PreparedStatement...)
+ dst = append(dst, 0)
+
+ if len(src.ParameterFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many parameter format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterFormatCodes)))
+ for _, fc := range src.ParameterFormatCodes {
+ dst = pgio.AppendInt16(dst, fc)
+ }
+
+ if len(src.Parameters) > math.MaxUint16 {
+ return nil, errors.New("too many parameters")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.Parameters)))
+ for _, p := range src.Parameters {
+ if p == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ continue
+ }
+
+ dst = pgio.AppendInt32(dst, int32(len(p)))
+ dst = append(dst, p...)
+ }
+
+ if len(src.ResultFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many result format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ResultFormatCodes)))
+ for _, fc := range src.ResultFormatCodes {
+ dst = pgio.AppendInt16(dst, fc)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Bind) MarshalJSON() ([]byte, error) {
+ formattedParameters := make([]map[string]string, len(src.Parameters))
+ for i, p := range src.Parameters {
+ if p == nil {
+ continue
+ }
+
+ textFormat := true
+ if len(src.ParameterFormatCodes) == 1 {
+ textFormat = src.ParameterFormatCodes[0] == 0
+ } else if len(src.ParameterFormatCodes) > 1 {
+ textFormat = src.ParameterFormatCodes[i] == 0
+ }
+
+ if textFormat {
+ formattedParameters[i] = map[string]string{"text": string(p)}
+ } else {
+ formattedParameters[i] = map[string]string{"binary": hex.EncodeToString(p)}
+ }
+ }
+
+ return json.Marshal(struct {
+ Type string
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters []map[string]string
+ ResultFormatCodes []int16
+ }{
+ Type: "Bind",
+ DestinationPortal: src.DestinationPortal,
+ PreparedStatement: src.PreparedStatement,
+ ParameterFormatCodes: src.ParameterFormatCodes,
+ Parameters: formattedParameters,
+ ResultFormatCodes: src.ResultFormatCodes,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *Bind) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters []map[string]string
+ ResultFormatCodes []int16
+ }
+ err := json.Unmarshal(data, &msg)
+ if err != nil {
+ return err
+ }
+ dst.DestinationPortal = msg.DestinationPortal
+ dst.PreparedStatement = msg.PreparedStatement
+ dst.ParameterFormatCodes = msg.ParameterFormatCodes
+ dst.Parameters = make([][]byte, len(msg.Parameters))
+ dst.ResultFormatCodes = msg.ResultFormatCodes
+ for n, parameter := range msg.Parameters {
+ dst.Parameters[n], err = getValueFromJSON(parameter)
+ if err != nil {
+ return fmt.Errorf("cannot get param %d: %w", n, err)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/bind_complete.go b/vendor/github.com/jackc/pgx/v5/pgproto3/bind_complete.go
new file mode 100644
index 0000000..bacf30d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/bind_complete.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type BindComplete struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*BindComplete) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *BindComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "BindComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *BindComplete) Encode(dst []byte) ([]byte, error) {
+ return append(dst, '2', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src BindComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "BindComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/cancel_request.go b/vendor/github.com/jackc/pgx/v5/pgproto3/cancel_request.go
new file mode 100644
index 0000000..6b52dd9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/cancel_request.go
@@ -0,0 +1,58 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const cancelRequestCode = 80877102
+
+type CancelRequest struct {
+ ProcessID uint32
+ SecretKey uint32
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*CancelRequest) Frontend() {}
+
+func (dst *CancelRequest) Decode(src []byte) error {
+ if len(src) != 12 {
+ return errors.New("bad cancel request size")
+ }
+
+ requestCode := binary.BigEndian.Uint32(src)
+
+ if requestCode != cancelRequestCode {
+ return errors.New("bad cancel request code")
+ }
+
+ dst.ProcessID = binary.BigEndian.Uint32(src[4:])
+ dst.SecretKey = binary.BigEndian.Uint32(src[8:])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 4 byte message length.
+func (src *CancelRequest) Encode(dst []byte) ([]byte, error) {
+ dst = pgio.AppendInt32(dst, 16)
+ dst = pgio.AppendInt32(dst, cancelRequestCode)
+ dst = pgio.AppendUint32(dst, src.ProcessID)
+ dst = pgio.AppendUint32(dst, src.SecretKey)
+ return dst, nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CancelRequest) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProcessID uint32
+ SecretKey uint32
+ }{
+ Type: "CancelRequest",
+ ProcessID: src.ProcessID,
+ SecretKey: src.SecretKey,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/chunkreader.go b/vendor/github.com/jackc/pgx/v5/pgproto3/chunkreader.go
new file mode 100644
index 0000000..fc0fa61
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/chunkreader.go
@@ -0,0 +1,90 @@
+package pgproto3
+
+import (
+ "io"
+
+ "github.com/jackc/pgx/v5/internal/iobufpool"
+)
+
+// chunkReader is a io.Reader wrapper that minimizes IO reads and memory allocations. It allocates memory in chunks and
+// will read as much as will fit in the current buffer in a single call regardless of how large a read is actually
+// requested. The memory returned via Next is only valid until the next call to Next.
+//
+// This is roughly equivalent to a bufio.Reader that only uses Peek and Discard to never copy bytes.
+type chunkReader struct {
+ r io.Reader
+
+ buf *[]byte
+ rp, wp int // buf read position and write position
+
+ minBufSize int
+}
+
+// newChunkReader creates and returns a new chunkReader for r with default configuration. If minBufSize is <= 0 it uses
+// a default value.
+func newChunkReader(r io.Reader, minBufSize int) *chunkReader {
+ if minBufSize <= 0 {
+ // By historical reasons Postgres currently has 8KB send buffer inside,
+ // so here we want to have at least the same size buffer.
+ // @see https://github.com/postgres/postgres/blob/249d64999615802752940e017ee5166e726bc7cd/src/backend/libpq/pqcomm.c#L134
+ // @see https://www.postgresql.org/message-id/0cdc5485-cb3c-5e16-4a46-e3b2f7a41322%40ya.ru
+ //
+ // In addition, testing has found no benefit of any larger buffer.
+ minBufSize = 8192
+ }
+
+ return &chunkReader{
+ r: r,
+ minBufSize: minBufSize,
+ buf: iobufpool.Get(minBufSize),
+ }
+}
+
+// Next returns buf filled with the next n bytes. buf is only valid until next call of Next. If an error occurs, buf
+// will be nil.
+func (r *chunkReader) Next(n int) (buf []byte, err error) {
+ // Reset the buffer if it is empty
+ if r.rp == r.wp {
+ if len(*r.buf) != r.minBufSize {
+ iobufpool.Put(r.buf)
+ r.buf = iobufpool.Get(r.minBufSize)
+ }
+ r.rp = 0
+ r.wp = 0
+ }
+
+ // n bytes already in buf
+ if (r.wp - r.rp) >= n {
+ buf = (*r.buf)[r.rp : r.rp+n : r.rp+n]
+ r.rp += n
+ return buf, err
+ }
+
+ // buf is smaller than requested number of bytes
+ if len(*r.buf) < n {
+ bigBuf := iobufpool.Get(n)
+ r.wp = copy((*bigBuf), (*r.buf)[r.rp:r.wp])
+ r.rp = 0
+ iobufpool.Put(r.buf)
+ r.buf = bigBuf
+ }
+
+ // buf is large enough, but need to shift filled area to start to make enough contiguous space
+ minReadCount := n - (r.wp - r.rp)
+ if (len(*r.buf) - r.wp) < minReadCount {
+ r.wp = copy((*r.buf), (*r.buf)[r.rp:r.wp])
+ r.rp = 0
+ }
+
+ // Read at least the required number of bytes from the underlying io.Reader
+ readBytesCount, err := io.ReadAtLeast(r.r, (*r.buf)[r.wp:], minReadCount)
+ r.wp += readBytesCount
+ // fmt.Println("read", n)
+ if err != nil {
+ return nil, err
+ }
+
+ buf = (*r.buf)[r.rp : r.rp+n : r.rp+n]
+ r.rp += n
+ return buf, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/close.go b/vendor/github.com/jackc/pgx/v5/pgproto3/close.go
new file mode 100644
index 0000000..0b50f27
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/close.go
@@ -0,0 +1,81 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+)
+
+type Close struct {
+ ObjectType byte // 'S' = prepared statement, 'P' = portal
+ Name string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Close) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Close) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "Close"}
+ }
+
+ dst.ObjectType = src[0]
+ rp := 1
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx != len(src[rp:])-1 {
+ return &invalidMessageFormatErr{messageType: "Close"}
+ }
+
+ dst.Name = string(src[rp : len(src)-1])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Close) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'C')
+ dst = append(dst, src.ObjectType)
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Close) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ObjectType string
+ Name string
+ }{
+ Type: "Close",
+ ObjectType: string(src.ObjectType),
+ Name: src.Name,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *Close) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ ObjectType string
+ Name string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ if len(msg.ObjectType) != 1 {
+ return errors.New("invalid length for Close.ObjectType")
+ }
+
+ dst.ObjectType = byte(msg.ObjectType[0])
+ dst.Name = msg.Name
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/close_complete.go b/vendor/github.com/jackc/pgx/v5/pgproto3/close_complete.go
new file mode 100644
index 0000000..833f7a1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/close_complete.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type CloseComplete struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CloseComplete) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CloseComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "CloseComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CloseComplete) Encode(dst []byte) ([]byte, error) {
+ return append(dst, '3', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CloseComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "CloseComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/command_complete.go b/vendor/github.com/jackc/pgx/v5/pgproto3/command_complete.go
new file mode 100644
index 0000000..eba7094
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/command_complete.go
@@ -0,0 +1,66 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type CommandComplete struct {
+ CommandTag []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CommandComplete) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CommandComplete) Decode(src []byte) error {
+ idx := bytes.IndexByte(src, 0)
+ if idx == -1 {
+ return &invalidMessageFormatErr{messageType: "CommandComplete", details: "unterminated string"}
+ }
+ if idx != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "CommandComplete", details: "string terminated too early"}
+ }
+
+ dst.CommandTag = src[:idx]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CommandComplete) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'C')
+ dst = append(dst, src.CommandTag...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CommandComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ CommandTag string
+ }{
+ Type: "CommandComplete",
+ CommandTag: string(src.CommandTag),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CommandComplete) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ CommandTag string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.CommandTag = []byte(msg.CommandTag)
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_both_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_both_response.go
new file mode 100644
index 0000000..99e1afe
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_both_response.go
@@ -0,0 +1,95 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type CopyBothResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CopyBothResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyBothResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyBothResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyBothResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyBothResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyBothResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'W')
+ dst = append(dst, src.OverallFormat)
+ if len(src.ColumnFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many column format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyBothResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyBothResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CopyBothResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ OverallFormat string
+ ColumnFormatCodes []uint16
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ if len(msg.OverallFormat) != 1 {
+ return errors.New("invalid length for CopyBothResponse.OverallFormat")
+ }
+
+ dst.OverallFormat = msg.OverallFormat[0]
+ dst.ColumnFormatCodes = msg.ColumnFormatCodes
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_data.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_data.go
new file mode 100644
index 0000000..89ecdd4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_data.go
@@ -0,0 +1,59 @@
+package pgproto3
+
+import (
+ "encoding/hex"
+ "encoding/json"
+)
+
+type CopyData struct {
+ Data []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CopyData) Backend() {}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*CopyData) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyData) Decode(src []byte) error {
+ dst.Data = src
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyData) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'd')
+ dst = append(dst, src.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "CopyData",
+ Data: hex.EncodeToString(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CopyData) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Data = []byte(msg.Data)
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_done.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_done.go
new file mode 100644
index 0000000..040814d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_done.go
@@ -0,0 +1,38 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type CopyDone struct {
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CopyDone) Backend() {}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*CopyDone) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyDone) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "CopyDone", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyDone) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'c', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyDone) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "CopyDone",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_fail.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_fail.go
new file mode 100644
index 0000000..72a85fd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_fail.go
@@ -0,0 +1,45 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type CopyFail struct {
+ Message string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*CopyFail) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyFail) Decode(src []byte) error {
+ idx := bytes.IndexByte(src, 0)
+ if idx != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "CopyFail"}
+ }
+
+ dst.Message = string(src[:idx])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyFail) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'f')
+ dst = append(dst, src.Message...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyFail) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Message string
+ }{
+ Type: "CopyFail",
+ Message: src.Message,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_in_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_in_response.go
new file mode 100644
index 0000000..06cf99c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_in_response.go
@@ -0,0 +1,96 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type CopyInResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CopyInResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyInResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyInResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyInResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyInResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyInResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'G')
+
+ dst = append(dst, src.OverallFormat)
+ if len(src.ColumnFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many column format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyInResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyInResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CopyInResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ OverallFormat string
+ ColumnFormatCodes []uint16
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ if len(msg.OverallFormat) != 1 {
+ return errors.New("invalid length for CopyInResponse.OverallFormat")
+ }
+
+ dst.OverallFormat = msg.OverallFormat[0]
+ dst.ColumnFormatCodes = msg.ColumnFormatCodes
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_out_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_out_response.go
new file mode 100644
index 0000000..549e916
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_out_response.go
@@ -0,0 +1,96 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type CopyOutResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+func (*CopyOutResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyOutResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyOutResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyOutResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyOutResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyOutResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'H')
+
+ dst = append(dst, src.OverallFormat)
+
+ if len(src.ColumnFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many column format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyOutResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyOutResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CopyOutResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ OverallFormat string
+ ColumnFormatCodes []uint16
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ if len(msg.OverallFormat) != 1 {
+ return errors.New("invalid length for CopyOutResponse.OverallFormat")
+ }
+
+ dst.OverallFormat = msg.OverallFormat[0]
+ dst.ColumnFormatCodes = msg.ColumnFormatCodes
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/data_row.go b/vendor/github.com/jackc/pgx/v5/pgproto3/data_row.go
new file mode 100644
index 0000000..fdfb0f7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/data_row.go
@@ -0,0 +1,143 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type DataRow struct {
+ Values [][]byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*DataRow) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *DataRow) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+ rp := 0
+ fieldCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ // If the capacity of the values slice is too small OR substantially too
+ // large reallocate. This is too avoid one row with many columns from
+ // permanently allocating memory.
+ if cap(dst.Values) < fieldCount || cap(dst.Values)-fieldCount > 32 {
+ newCap := 32
+ if newCap < fieldCount {
+ newCap = fieldCount
+ }
+ dst.Values = make([][]byte, fieldCount, newCap)
+ } else {
+ dst.Values = dst.Values[:fieldCount]
+ }
+
+ for i := 0; i < fieldCount; i++ {
+ if len(src[rp:]) < 4 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+
+ valueLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ // null
+ if valueLen == -1 {
+ dst.Values[i] = nil
+ } else {
+ if len(src[rp:]) < valueLen || valueLen < 0 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+
+ dst.Values[i] = src[rp : rp+valueLen : rp+valueLen]
+ rp += valueLen
+ }
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *DataRow) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'D')
+
+ if len(src.Values) > math.MaxUint16 {
+ return nil, errors.New("too many values")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.Values)))
+ for _, v := range src.Values {
+ if v == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ continue
+ }
+
+ dst = pgio.AppendInt32(dst, int32(len(v)))
+ dst = append(dst, v...)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src DataRow) MarshalJSON() ([]byte, error) {
+ formattedValues := make([]map[string]string, len(src.Values))
+ for i, v := range src.Values {
+ if v == nil {
+ continue
+ }
+
+ var hasNonPrintable bool
+ for _, b := range v {
+ if b < 32 {
+ hasNonPrintable = true
+ break
+ }
+ }
+
+ if hasNonPrintable {
+ formattedValues[i] = map[string]string{"binary": hex.EncodeToString(v)}
+ } else {
+ formattedValues[i] = map[string]string{"text": string(v)}
+ }
+ }
+
+ return json.Marshal(struct {
+ Type string
+ Values []map[string]string
+ }{
+ Type: "DataRow",
+ Values: formattedValues,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *DataRow) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Values []map[string]string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Values = make([][]byte, len(msg.Values))
+ for n, parameter := range msg.Values {
+ var err error
+ dst.Values[n], err = getValueFromJSON(parameter)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/describe.go b/vendor/github.com/jackc/pgx/v5/pgproto3/describe.go
new file mode 100644
index 0000000..89feff2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/describe.go
@@ -0,0 +1,80 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+)
+
+type Describe struct {
+ ObjectType byte // 'S' = prepared statement, 'P' = portal
+ Name string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Describe) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Describe) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "Describe"}
+ }
+
+ dst.ObjectType = src[0]
+ rp := 1
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx != len(src[rp:])-1 {
+ return &invalidMessageFormatErr{messageType: "Describe"}
+ }
+
+ dst.Name = string(src[rp : len(src)-1])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Describe) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'D')
+ dst = append(dst, src.ObjectType)
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Describe) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ObjectType string
+ Name string
+ }{
+ Type: "Describe",
+ ObjectType: string(src.ObjectType),
+ Name: src.Name,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *Describe) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ ObjectType string
+ Name string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ if len(msg.ObjectType) != 1 {
+ return errors.New("invalid length for Describe.ObjectType")
+ }
+
+ dst.ObjectType = byte(msg.ObjectType[0])
+ dst.Name = msg.Name
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/doc.go b/vendor/github.com/jackc/pgx/v5/pgproto3/doc.go
new file mode 100644
index 0000000..0afd18e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/doc.go
@@ -0,0 +1,11 @@
+// Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
+//
+// The primary interfaces are Frontend and Backend. They correspond to a client and server respectively. Messages are
+// sent with Send (or a specialized Send variant). Messages are automatically buffered to minimize small writes. Call
+// Flush to ensure a message has actually been sent.
+//
+// The Trace method of Frontend and Backend can be used to examine the wire-level message traffic. It outputs in a
+// similar format to the PQtrace function in libpq.
+//
+// See https://www.postgresql.org/docs/current/protocol-message-formats.html for meanings of the different messages.
+package pgproto3
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/empty_query_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/empty_query_response.go
new file mode 100644
index 0000000..cb6cca0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/empty_query_response.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type EmptyQueryResponse struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*EmptyQueryResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *EmptyQueryResponse) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "EmptyQueryResponse", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *EmptyQueryResponse) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'I', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src EmptyQueryResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "EmptyQueryResponse",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/error_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/error_response.go
new file mode 100644
index 0000000..6ef9bd0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/error_response.go
@@ -0,0 +1,326 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+ "strconv"
+)
+
+type ErrorResponse struct {
+ Severity string
+ SeverityUnlocalized string // only in 9.6 and greater
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+
+ UnknownFields map[byte]string
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ErrorResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ErrorResponse) Decode(src []byte) error {
+ *dst = ErrorResponse{}
+
+ buf := bytes.NewBuffer(src)
+
+ for {
+ k, err := buf.ReadByte()
+ if err != nil {
+ return err
+ }
+ if k == 0 {
+ break
+ }
+
+ vb, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ v := string(vb[:len(vb)-1])
+
+ switch k {
+ case 'S':
+ dst.Severity = v
+ case 'V':
+ dst.SeverityUnlocalized = v
+ case 'C':
+ dst.Code = v
+ case 'M':
+ dst.Message = v
+ case 'D':
+ dst.Detail = v
+ case 'H':
+ dst.Hint = v
+ case 'P':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.Position = int32(n)
+ case 'p':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.InternalPosition = int32(n)
+ case 'q':
+ dst.InternalQuery = v
+ case 'W':
+ dst.Where = v
+ case 's':
+ dst.SchemaName = v
+ case 't':
+ dst.TableName = v
+ case 'c':
+ dst.ColumnName = v
+ case 'd':
+ dst.DataTypeName = v
+ case 'n':
+ dst.ConstraintName = v
+ case 'F':
+ dst.File = v
+ case 'L':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.Line = int32(n)
+ case 'R':
+ dst.Routine = v
+
+ default:
+ if dst.UnknownFields == nil {
+ dst.UnknownFields = make(map[byte]string)
+ }
+ dst.UnknownFields[k] = v
+ }
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ErrorResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'E')
+ dst = src.appendFields(dst)
+ return finishMessage(dst, sp)
+}
+
+func (src *ErrorResponse) appendFields(dst []byte) []byte {
+ if src.Severity != "" {
+ dst = append(dst, 'S')
+ dst = append(dst, src.Severity...)
+ dst = append(dst, 0)
+ }
+ if src.SeverityUnlocalized != "" {
+ dst = append(dst, 'V')
+ dst = append(dst, src.SeverityUnlocalized...)
+ dst = append(dst, 0)
+ }
+ if src.Code != "" {
+ dst = append(dst, 'C')
+ dst = append(dst, src.Code...)
+ dst = append(dst, 0)
+ }
+ if src.Message != "" {
+ dst = append(dst, 'M')
+ dst = append(dst, src.Message...)
+ dst = append(dst, 0)
+ }
+ if src.Detail != "" {
+ dst = append(dst, 'D')
+ dst = append(dst, src.Detail...)
+ dst = append(dst, 0)
+ }
+ if src.Hint != "" {
+ dst = append(dst, 'H')
+ dst = append(dst, src.Hint...)
+ dst = append(dst, 0)
+ }
+ if src.Position != 0 {
+ dst = append(dst, 'P')
+ dst = append(dst, strconv.Itoa(int(src.Position))...)
+ dst = append(dst, 0)
+ }
+ if src.InternalPosition != 0 {
+ dst = append(dst, 'p')
+ dst = append(dst, strconv.Itoa(int(src.InternalPosition))...)
+ dst = append(dst, 0)
+ }
+ if src.InternalQuery != "" {
+ dst = append(dst, 'q')
+ dst = append(dst, src.InternalQuery...)
+ dst = append(dst, 0)
+ }
+ if src.Where != "" {
+ dst = append(dst, 'W')
+ dst = append(dst, src.Where...)
+ dst = append(dst, 0)
+ }
+ if src.SchemaName != "" {
+ dst = append(dst, 's')
+ dst = append(dst, src.SchemaName...)
+ dst = append(dst, 0)
+ }
+ if src.TableName != "" {
+ dst = append(dst, 't')
+ dst = append(dst, src.TableName...)
+ dst = append(dst, 0)
+ }
+ if src.ColumnName != "" {
+ dst = append(dst, 'c')
+ dst = append(dst, src.ColumnName...)
+ dst = append(dst, 0)
+ }
+ if src.DataTypeName != "" {
+ dst = append(dst, 'd')
+ dst = append(dst, src.DataTypeName...)
+ dst = append(dst, 0)
+ }
+ if src.ConstraintName != "" {
+ dst = append(dst, 'n')
+ dst = append(dst, src.ConstraintName...)
+ dst = append(dst, 0)
+ }
+ if src.File != "" {
+ dst = append(dst, 'F')
+ dst = append(dst, src.File...)
+ dst = append(dst, 0)
+ }
+ if src.Line != 0 {
+ dst = append(dst, 'L')
+ dst = append(dst, strconv.Itoa(int(src.Line))...)
+ dst = append(dst, 0)
+ }
+ if src.Routine != "" {
+ dst = append(dst, 'R')
+ dst = append(dst, src.Routine...)
+ dst = append(dst, 0)
+ }
+
+ for k, v := range src.UnknownFields {
+ dst = append(dst, k)
+ dst = append(dst, v...)
+ dst = append(dst, 0)
+ }
+
+ dst = append(dst, 0)
+
+ return dst
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src ErrorResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Severity string
+ SeverityUnlocalized string // only in 9.6 and greater
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+
+ UnknownFields map[byte]string
+ }{
+ Type: "ErrorResponse",
+ Severity: src.Severity,
+ SeverityUnlocalized: src.SeverityUnlocalized,
+ Code: src.Code,
+ Message: src.Message,
+ Detail: src.Detail,
+ Hint: src.Hint,
+ Position: src.Position,
+ InternalPosition: src.InternalPosition,
+ InternalQuery: src.InternalQuery,
+ Where: src.Where,
+ SchemaName: src.SchemaName,
+ TableName: src.TableName,
+ ColumnName: src.ColumnName,
+ DataTypeName: src.DataTypeName,
+ ConstraintName: src.ConstraintName,
+ File: src.File,
+ Line: src.Line,
+ Routine: src.Routine,
+ UnknownFields: src.UnknownFields,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *ErrorResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Type string
+ Severity string
+ SeverityUnlocalized string // only in 9.6 and greater
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+
+ UnknownFields map[byte]string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Severity = msg.Severity
+ dst.SeverityUnlocalized = msg.SeverityUnlocalized
+ dst.Code = msg.Code
+ dst.Message = msg.Message
+ dst.Detail = msg.Detail
+ dst.Hint = msg.Hint
+ dst.Position = msg.Position
+ dst.InternalPosition = msg.InternalPosition
+ dst.InternalQuery = msg.InternalQuery
+ dst.Where = msg.Where
+ dst.SchemaName = msg.SchemaName
+ dst.TableName = msg.TableName
+ dst.ColumnName = msg.ColumnName
+ dst.DataTypeName = msg.DataTypeName
+ dst.ConstraintName = msg.ConstraintName
+ dst.File = msg.File
+ dst.Line = msg.Line
+ dst.Routine = msg.Routine
+
+ dst.UnknownFields = msg.UnknownFields
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/execute.go b/vendor/github.com/jackc/pgx/v5/pgproto3/execute.go
new file mode 100644
index 0000000..31bc714
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/execute.go
@@ -0,0 +1,58 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Execute struct {
+ Portal string
+ MaxRows uint32
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Execute) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Execute) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Portal = string(b[:len(b)-1])
+
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "Execute"}
+ }
+ dst.MaxRows = binary.BigEndian.Uint32(buf.Next(4))
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Execute) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'E')
+ dst = append(dst, src.Portal...)
+ dst = append(dst, 0)
+ dst = pgio.AppendUint32(dst, src.MaxRows)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Execute) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Portal string
+ MaxRows uint32
+ }{
+ Type: "Execute",
+ Portal: src.Portal,
+ MaxRows: src.MaxRows,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/flush.go b/vendor/github.com/jackc/pgx/v5/pgproto3/flush.go
new file mode 100644
index 0000000..e5dc1fb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/flush.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Flush struct{}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Flush) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Flush) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Flush", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Flush) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'H', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Flush) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Flush",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go b/vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go
new file mode 100644
index 0000000..056e547
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go
@@ -0,0 +1,468 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Frontend acts as a client for the PostgreSQL wire protocol version 3.
+type Frontend struct {
+ cr *chunkReader
+ w io.Writer
+
+ // tracer is used to trace messages when Send or Receive is called. This means an outbound message is traced
+ // before it is actually transmitted (i.e. before Flush). It is safe to change this variable when the Frontend is
+ // idle. Setting and unsetting tracer provides equivalent functionality to PQtrace and PQuntrace in libpq.
+ tracer *tracer
+
+ wbuf []byte
+ encodeError error
+
+ // Backend message flyweights
+ authenticationOk AuthenticationOk
+ authenticationCleartextPassword AuthenticationCleartextPassword
+ authenticationMD5Password AuthenticationMD5Password
+ authenticationGSS AuthenticationGSS
+ authenticationGSSContinue AuthenticationGSSContinue
+ authenticationSASL AuthenticationSASL
+ authenticationSASLContinue AuthenticationSASLContinue
+ authenticationSASLFinal AuthenticationSASLFinal
+ backendKeyData BackendKeyData
+ bindComplete BindComplete
+ closeComplete CloseComplete
+ commandComplete CommandComplete
+ copyBothResponse CopyBothResponse
+ copyData CopyData
+ copyInResponse CopyInResponse
+ copyOutResponse CopyOutResponse
+ copyDone CopyDone
+ dataRow DataRow
+ emptyQueryResponse EmptyQueryResponse
+ errorResponse ErrorResponse
+ functionCallResponse FunctionCallResponse
+ noData NoData
+ noticeResponse NoticeResponse
+ notificationResponse NotificationResponse
+ parameterDescription ParameterDescription
+ parameterStatus ParameterStatus
+ parseComplete ParseComplete
+ readyForQuery ReadyForQuery
+ rowDescription RowDescription
+ portalSuspended PortalSuspended
+
+ bodyLen int
+ maxBodyLen int // maxBodyLen is the maximum length of a message body in octets. If a message body exceeds this length, Receive will return an error.
+ msgType byte
+ partialMsg bool
+ authType uint32
+}
+
+// NewFrontend creates a new Frontend.
+func NewFrontend(r io.Reader, w io.Writer) *Frontend {
+ cr := newChunkReader(r, 0)
+ return &Frontend{cr: cr, w: w}
+}
+
+// Send sends a message to the backend (i.e. the server). The message is buffered until Flush is called. Any error
+// encountered will be returned from Flush.
+//
+// Send can work with any FrontendMessage. Some commonly used message types such as Bind have specialized send methods
+// such as SendBind. These methods should be preferred when the type of message is known up front (e.g. when building an
+// extended query protocol query) as they may be faster due to knowing the type of msg rather than it being hidden
+// behind an interface.
+func (f *Frontend) Send(msg FrontendMessage) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceMessage('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// Flush writes any pending messages to the backend (i.e. the server).
+func (f *Frontend) Flush() error {
+ if err := f.encodeError; err != nil {
+ f.encodeError = nil
+ f.wbuf = f.wbuf[:0]
+ return &writeError{err: err, safeToRetry: true}
+ }
+
+ if len(f.wbuf) == 0 {
+ return nil
+ }
+
+ n, err := f.w.Write(f.wbuf)
+
+ const maxLen = 1024
+ if len(f.wbuf) > maxLen {
+ f.wbuf = make([]byte, 0, maxLen)
+ } else {
+ f.wbuf = f.wbuf[:0]
+ }
+
+ if err != nil {
+ return &writeError{err: err, safeToRetry: n == 0}
+ }
+
+ return nil
+}
+
+// Trace starts tracing the message traffic to w. It writes in a similar format to that produced by the libpq function
+// PQtrace.
+func (f *Frontend) Trace(w io.Writer, options TracerOptions) {
+ f.tracer = &tracer{
+ w: w,
+ buf: &bytes.Buffer{},
+ TracerOptions: options,
+ }
+}
+
+// Untrace stops tracing.
+func (f *Frontend) Untrace() {
+ f.tracer = nil
+}
+
+// SendBind sends a Bind message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendBind(msg *Bind) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceBind('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendParse sends a Parse message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendParse(msg *Parse) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceParse('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendClose sends a Close message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendClose(msg *Close) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceClose('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendDescribe sends a Describe message to the backend (i.e. the server). The message is buffered until Flush is
+// called. Any error encountered will be returned from Flush.
+func (f *Frontend) SendDescribe(msg *Describe) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceDescribe('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendExecute sends an Execute message to the backend (i.e. the server). The message is buffered until Flush is called.
+// Any error encountered will be returned from Flush.
+func (f *Frontend) SendExecute(msg *Execute) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.TraceQueryute('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendSync sends a Sync message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendSync(msg *Sync) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceSync('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendQuery sends a Query message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendQuery(msg *Query) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceQuery('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendUnbufferedEncodedCopyData immediately sends an encoded CopyData message to the backend (i.e. the server). This method
+// is more efficient than sending a CopyData message with Send as the message data is not copied to the internal buffer
+// before being written out. The internal buffer is flushed before the message is sent.
+func (f *Frontend) SendUnbufferedEncodedCopyData(msg []byte) error {
+ err := f.Flush()
+ if err != nil {
+ return err
+ }
+
+ n, err := f.w.Write(msg)
+ if err != nil {
+ return &writeError{err: err, safeToRetry: n == 0}
+ }
+
+ if f.tracer != nil {
+ f.tracer.traceCopyData('F', int32(len(msg)-1), &CopyData{})
+ }
+
+ return nil
+}
+
+func translateEOFtoErrUnexpectedEOF(err error) error {
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+}
+
+// Receive receives a message from the backend. The returned message is only valid until the next call to Receive.
+func (f *Frontend) Receive() (BackendMessage, error) {
+ if !f.partialMsg {
+ header, err := f.cr.Next(5)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ f.msgType = header[0]
+
+ msgLength := int(binary.BigEndian.Uint32(header[1:]))
+ if msgLength < 4 {
+ return nil, fmt.Errorf("invalid message length: %d", msgLength)
+ }
+
+ f.bodyLen = msgLength - 4
+ if f.maxBodyLen > 0 && f.bodyLen > f.maxBodyLen {
+ return nil, &ExceededMaxBodyLenErr{f.maxBodyLen, f.bodyLen}
+ }
+ f.partialMsg = true
+ }
+
+ msgBody, err := f.cr.Next(f.bodyLen)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ f.partialMsg = false
+
+ var msg BackendMessage
+ switch f.msgType {
+ case '1':
+ msg = &f.parseComplete
+ case '2':
+ msg = &f.bindComplete
+ case '3':
+ msg = &f.closeComplete
+ case 'A':
+ msg = &f.notificationResponse
+ case 'c':
+ msg = &f.copyDone
+ case 'C':
+ msg = &f.commandComplete
+ case 'd':
+ msg = &f.copyData
+ case 'D':
+ msg = &f.dataRow
+ case 'E':
+ msg = &f.errorResponse
+ case 'G':
+ msg = &f.copyInResponse
+ case 'H':
+ msg = &f.copyOutResponse
+ case 'I':
+ msg = &f.emptyQueryResponse
+ case 'K':
+ msg = &f.backendKeyData
+ case 'n':
+ msg = &f.noData
+ case 'N':
+ msg = &f.noticeResponse
+ case 'R':
+ var err error
+ msg, err = f.findAuthenticationMessageType(msgBody)
+ if err != nil {
+ return nil, err
+ }
+ case 's':
+ msg = &f.portalSuspended
+ case 'S':
+ msg = &f.parameterStatus
+ case 't':
+ msg = &f.parameterDescription
+ case 'T':
+ msg = &f.rowDescription
+ case 'V':
+ msg = &f.functionCallResponse
+ case 'W':
+ msg = &f.copyBothResponse
+ case 'Z':
+ msg = &f.readyForQuery
+ default:
+ return nil, fmt.Errorf("unknown message type: %c", f.msgType)
+ }
+
+ err = msg.Decode(msgBody)
+ if err != nil {
+ return nil, err
+ }
+
+ if f.tracer != nil {
+ f.tracer.traceMessage('B', int32(5+len(msgBody)), msg)
+ }
+
+ return msg, nil
+}
+
+// Authentication message type constants.
+// See src/include/libpq/pqcomm.h for all
+// constants.
+const (
+ AuthTypeOk = 0
+ AuthTypeCleartextPassword = 3
+ AuthTypeMD5Password = 5
+ AuthTypeSCMCreds = 6
+ AuthTypeGSS = 7
+ AuthTypeGSSCont = 8
+ AuthTypeSSPI = 9
+ AuthTypeSASL = 10
+ AuthTypeSASLContinue = 11
+ AuthTypeSASLFinal = 12
+)
+
+func (f *Frontend) findAuthenticationMessageType(src []byte) (BackendMessage, error) {
+ if len(src) < 4 {
+ return nil, errors.New("authentication message too short")
+ }
+ f.authType = binary.BigEndian.Uint32(src[:4])
+
+ switch f.authType {
+ case AuthTypeOk:
+ return &f.authenticationOk, nil
+ case AuthTypeCleartextPassword:
+ return &f.authenticationCleartextPassword, nil
+ case AuthTypeMD5Password:
+ return &f.authenticationMD5Password, nil
+ case AuthTypeSCMCreds:
+ return nil, errors.New("AuthTypeSCMCreds is unimplemented")
+ case AuthTypeGSS:
+ return &f.authenticationGSS, nil
+ case AuthTypeGSSCont:
+ return &f.authenticationGSSContinue, nil
+ case AuthTypeSSPI:
+ return nil, errors.New("AuthTypeSSPI is unimplemented")
+ case AuthTypeSASL:
+ return &f.authenticationSASL, nil
+ case AuthTypeSASLContinue:
+ return &f.authenticationSASLContinue, nil
+ case AuthTypeSASLFinal:
+ return &f.authenticationSASLFinal, nil
+ default:
+ return nil, fmt.Errorf("unknown authentication type: %d", f.authType)
+ }
+}
+
+// GetAuthType returns the authType used in the current state of the frontend.
+// See SetAuthType for more information.
+func (f *Frontend) GetAuthType() uint32 {
+ return f.authType
+}
+
+func (f *Frontend) ReadBufferLen() int {
+ return f.cr.wp - f.cr.rp
+}
+
+// SetMaxBodyLen sets the maximum length of a message body in octets.
+// If a message body exceeds this length, Receive will return an error.
+// This is useful for protecting against a corrupted server that sends
+// messages with incorrect length, which can cause memory exhaustion.
+// The default value is 0.
+// If maxBodyLen is 0, then no maximum is enforced.
+func (f *Frontend) SetMaxBodyLen(maxBodyLen int) {
+ f.maxBodyLen = maxBodyLen
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/function_call.go b/vendor/github.com/jackc/pgx/v5/pgproto3/function_call.go
new file mode 100644
index 0000000..7d83579
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/function_call.go
@@ -0,0 +1,102 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type FunctionCall struct {
+ Function uint32
+ ArgFormatCodes []uint16
+ Arguments [][]byte
+ ResultFormatCode uint16
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*FunctionCall) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *FunctionCall) Decode(src []byte) error {
+ *dst = FunctionCall{}
+ rp := 0
+ // Specifies the object ID of the function to call.
+ dst.Function = binary.BigEndian.Uint32(src[rp:])
+ rp += 4
+ // The number of argument format codes that follow (denoted C below).
+ // This can be zero to indicate that there are no arguments or that the arguments all use the default format (text);
+ // or one, in which case the specified format code is applied to all arguments;
+ // or it can equal the actual number of arguments.
+ nArgumentCodes := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ argumentCodes := make([]uint16, nArgumentCodes)
+ for i := 0; i < nArgumentCodes; i++ {
+ // The argument format codes. Each must presently be zero (text) or one (binary).
+ ac := binary.BigEndian.Uint16(src[rp:])
+ if ac != 0 && ac != 1 {
+ return &invalidMessageFormatErr{messageType: "FunctionCall"}
+ }
+ argumentCodes[i] = ac
+ rp += 2
+ }
+ dst.ArgFormatCodes = argumentCodes
+
+ // Specifies the number of arguments being supplied to the function.
+ nArguments := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ arguments := make([][]byte, nArguments)
+ for i := 0; i < nArguments; i++ {
+ // The length of the argument value, in bytes (this count does not include itself). Can be zero.
+ // As a special case, -1 indicates a NULL argument value. No value bytes follow in the NULL case.
+ argumentLength := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ if argumentLength == -1 {
+ arguments[i] = nil
+ } else {
+ // The value of the argument, in the format indicated by the associated format code. n is the above length.
+ argumentValue := src[rp : rp+argumentLength]
+ rp += argumentLength
+ arguments[i] = argumentValue
+ }
+ }
+ dst.Arguments = arguments
+ // The format code for the function result. Must presently be zero (text) or one (binary).
+ resultFormatCode := binary.BigEndian.Uint16(src[rp:])
+ if resultFormatCode != 0 && resultFormatCode != 1 {
+ return &invalidMessageFormatErr{messageType: "FunctionCall"}
+ }
+ dst.ResultFormatCode = resultFormatCode
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *FunctionCall) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'F')
+ dst = pgio.AppendUint32(dst, src.Function)
+
+ if len(src.ArgFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many arg format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ArgFormatCodes)))
+ for _, argFormatCode := range src.ArgFormatCodes {
+ dst = pgio.AppendUint16(dst, argFormatCode)
+ }
+
+ if len(src.Arguments) > math.MaxUint16 {
+ return nil, errors.New("too many arguments")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.Arguments)))
+ for _, argument := range src.Arguments {
+ if argument == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ } else {
+ dst = pgio.AppendInt32(dst, int32(len(argument)))
+ dst = append(dst, argument...)
+ }
+ }
+ dst = pgio.AppendUint16(dst, src.ResultFormatCode)
+ return finishMessage(dst, sp)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/function_call_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/function_call_response.go
new file mode 100644
index 0000000..1f27349
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/function_call_response.go
@@ -0,0 +1,97 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type FunctionCallResponse struct {
+ Result []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*FunctionCallResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *FunctionCallResponse) Decode(src []byte) error {
+ if len(src) < 4 {
+ return &invalidMessageFormatErr{messageType: "FunctionCallResponse"}
+ }
+ rp := 0
+ resultSize := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ if resultSize == -1 {
+ dst.Result = nil
+ return nil
+ }
+
+ if len(src[rp:]) != resultSize {
+ return &invalidMessageFormatErr{messageType: "FunctionCallResponse"}
+ }
+
+ dst.Result = src[rp:]
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *FunctionCallResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'V')
+
+ if src.Result == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ } else {
+ dst = pgio.AppendInt32(dst, int32(len(src.Result)))
+ dst = append(dst, src.Result...)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src FunctionCallResponse) MarshalJSON() ([]byte, error) {
+ var formattedValue map[string]string
+ var hasNonPrintable bool
+ for _, b := range src.Result {
+ if b < 32 {
+ hasNonPrintable = true
+ break
+ }
+ }
+
+ if hasNonPrintable {
+ formattedValue = map[string]string{"binary": hex.EncodeToString(src.Result)}
+ } else {
+ formattedValue = map[string]string{"text": string(src.Result)}
+ }
+
+ return json.Marshal(struct {
+ Type string
+ Result map[string]string
+ }{
+ Type: "FunctionCallResponse",
+ Result: formattedValue,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *FunctionCallResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Result map[string]string
+ }
+ err := json.Unmarshal(data, &msg)
+ if err != nil {
+ return err
+ }
+ dst.Result, err = getValueFromJSON(msg.Result)
+ return err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/gss_enc_request.go b/vendor/github.com/jackc/pgx/v5/pgproto3/gss_enc_request.go
new file mode 100644
index 0000000..70cb20c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/gss_enc_request.go
@@ -0,0 +1,49 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const gssEncReqNumber = 80877104
+
+type GSSEncRequest struct {
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*GSSEncRequest) Frontend() {}
+
+func (dst *GSSEncRequest) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("gss encoding request too short")
+ }
+
+ requestCode := binary.BigEndian.Uint32(src)
+
+ if requestCode != gssEncReqNumber {
+ return errors.New("bad gss encoding request code")
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 4 byte message length.
+func (src *GSSEncRequest) Encode(dst []byte) ([]byte, error) {
+ dst = pgio.AppendInt32(dst, 8)
+ dst = pgio.AppendInt32(dst, gssEncReqNumber)
+ return dst, nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src GSSEncRequest) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProtocolVersion uint32
+ Parameters map[string]string
+ }{
+ Type: "GSSEncRequest",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/gss_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/gss_response.go
new file mode 100644
index 0000000..10d9377
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/gss_response.go
@@ -0,0 +1,46 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type GSSResponse struct {
+ Data []byte
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (g *GSSResponse) Frontend() {}
+
+func (g *GSSResponse) Decode(data []byte) error {
+ g.Data = data
+ return nil
+}
+
+func (g *GSSResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'p')
+ dst = append(dst, g.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (g *GSSResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data []byte
+ }{
+ Type: "GSSResponse",
+ Data: g.Data,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (g *GSSResponse) UnmarshalJSON(data []byte) error {
+ var msg struct {
+ Data []byte
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ g.Data = msg.Data
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/no_data.go b/vendor/github.com/jackc/pgx/v5/pgproto3/no_data.go
new file mode 100644
index 0000000..cbcaad4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/no_data.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type NoData struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*NoData) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *NoData) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "NoData", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *NoData) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'n', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src NoData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "NoData",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/notice_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/notice_response.go
new file mode 100644
index 0000000..497aba6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/notice_response.go
@@ -0,0 +1,19 @@
+package pgproto3
+
+type NoticeResponse ErrorResponse
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*NoticeResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *NoticeResponse) Decode(src []byte) error {
+ return (*ErrorResponse)(dst).Decode(src)
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *NoticeResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'N')
+ dst = (*ErrorResponse)(src).appendFields(dst)
+ return finishMessage(dst, sp)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/notification_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/notification_response.go
new file mode 100644
index 0000000..243b6bf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/notification_response.go
@@ -0,0 +1,71 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type NotificationResponse struct {
+ PID uint32
+ Channel string
+ Payload string
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*NotificationResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *NotificationResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "NotificationResponse", details: "too short"}
+ }
+
+ pid := binary.BigEndian.Uint32(buf.Next(4))
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ channel := string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ payload := string(b[:len(b)-1])
+
+ *dst = NotificationResponse{PID: pid, Channel: channel, Payload: payload}
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *NotificationResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'A')
+ dst = pgio.AppendUint32(dst, src.PID)
+ dst = append(dst, src.Channel...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Payload...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src NotificationResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ PID uint32
+ Channel string
+ Payload string
+ }{
+ Type: "NotificationResponse",
+ PID: src.PID,
+ Channel: src.Channel,
+ Payload: src.Payload,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_description.go b/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_description.go
new file mode 100644
index 0000000..1ef27b7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_description.go
@@ -0,0 +1,67 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type ParameterDescription struct {
+ ParameterOIDs []uint32
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ParameterDescription) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ParameterDescription) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "ParameterDescription"}
+ }
+
+ // Reported parameter count will be incorrect when number of args is greater than uint16
+ buf.Next(2)
+ // Instead infer parameter count by remaining size of message
+ parameterCount := buf.Len() / 4
+
+ *dst = ParameterDescription{ParameterOIDs: make([]uint32, parameterCount)}
+
+ for i := 0; i < parameterCount; i++ {
+ dst.ParameterOIDs[i] = binary.BigEndian.Uint32(buf.Next(4))
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ParameterDescription) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 't')
+
+ if len(src.ParameterOIDs) > math.MaxUint16 {
+ return nil, errors.New("too many parameter oids")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
+ for _, oid := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, oid)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src ParameterDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ParameterOIDs []uint32
+ }{
+ Type: "ParameterDescription",
+ ParameterOIDs: src.ParameterOIDs,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_status.go b/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_status.go
new file mode 100644
index 0000000..9ee0720
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_status.go
@@ -0,0 +1,58 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type ParameterStatus struct {
+ Name string
+ Value string
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ParameterStatus) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ParameterStatus) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ name := string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ value := string(b[:len(b)-1])
+
+ *dst = ParameterStatus{Name: name, Value: value}
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ParameterStatus) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'S')
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Value...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (ps ParameterStatus) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Name string
+ Value string
+ }{
+ Type: "ParameterStatus",
+ Name: ps.Name,
+ Value: ps.Value,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/parse.go b/vendor/github.com/jackc/pgx/v5/pgproto3/parse.go
new file mode 100644
index 0000000..6ba3486
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/parse.go
@@ -0,0 +1,89 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Parse struct {
+ Name string
+ Query string
+ ParameterOIDs []uint32
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Parse) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Parse) Decode(src []byte) error {
+ *dst = Parse{}
+
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Name = string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Query = string(b[:len(b)-1])
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "Parse"}
+ }
+ parameterOIDCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+
+ for i := 0; i < parameterOIDCount; i++ {
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "Parse"}
+ }
+ dst.ParameterOIDs = append(dst.ParameterOIDs, binary.BigEndian.Uint32(buf.Next(4)))
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Parse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'P')
+
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Query...)
+ dst = append(dst, 0)
+
+ if len(src.ParameterOIDs) > math.MaxUint16 {
+ return nil, errors.New("too many parameter oids")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
+ for _, oid := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, oid)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Parse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Name string
+ Query string
+ ParameterOIDs []uint32
+ }{
+ Type: "Parse",
+ Name: src.Name,
+ Query: src.Query,
+ ParameterOIDs: src.ParameterOIDs,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/parse_complete.go b/vendor/github.com/jackc/pgx/v5/pgproto3/parse_complete.go
new file mode 100644
index 0000000..cff9e27
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/parse_complete.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type ParseComplete struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ParseComplete) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ParseComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "ParseComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ParseComplete) Encode(dst []byte) ([]byte, error) {
+ return append(dst, '1', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src ParseComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "ParseComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/password_message.go b/vendor/github.com/jackc/pgx/v5/pgproto3/password_message.go
new file mode 100644
index 0000000..67b7851
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/password_message.go
@@ -0,0 +1,49 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type PasswordMessage struct {
+ Password string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*PasswordMessage) Frontend() {}
+
+// InitialResponse identifies this message as an authentication response.
+func (*PasswordMessage) InitialResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *PasswordMessage) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Password = string(b[:len(b)-1])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *PasswordMessage) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'p')
+ dst = append(dst, src.Password...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src PasswordMessage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Password string
+ }{
+ Type: "PasswordMessage",
+ Password: src.Password,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go b/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go
new file mode 100644
index 0000000..128f97f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go
@@ -0,0 +1,120 @@
+package pgproto3
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// maxMessageBodyLen is the maximum length of a message body in bytes. See PG_LARGE_MESSAGE_LIMIT in the PostgreSQL
+// source. It is defined as (MaxAllocSize - 1). MaxAllocSize is defined as 0x3fffffff.
+const maxMessageBodyLen = (0x3fffffff - 1)
+
+// Message is the interface implemented by an object that can decode and encode
+// a particular PostgreSQL message.
+type Message interface {
+ // Decode is allowed and expected to retain a reference to data after
+ // returning (unlike encoding.BinaryUnmarshaler).
+ Decode(data []byte) error
+
+ // Encode appends itself to dst and returns the new buffer.
+ Encode(dst []byte) ([]byte, error)
+}
+
+// FrontendMessage is a message sent by the frontend (i.e. the client).
+type FrontendMessage interface {
+ Message
+ Frontend() // no-op method to distinguish frontend from backend methods
+}
+
+// BackendMessage is a message sent by the backend (i.e. the server).
+type BackendMessage interface {
+ Message
+ Backend() // no-op method to distinguish frontend from backend methods
+}
+
+type AuthenticationResponseMessage interface {
+ BackendMessage
+ AuthenticationResponse() // no-op method to distinguish authentication responses
+}
+
+type invalidMessageLenErr struct {
+ messageType string
+ expectedLen int
+ actualLen int
+}
+
+func (e *invalidMessageLenErr) Error() string {
+ return fmt.Sprintf("%s body must have length of %d, but it is %d", e.messageType, e.expectedLen, e.actualLen)
+}
+
+type invalidMessageFormatErr struct {
+ messageType string
+ details string
+}
+
+func (e *invalidMessageFormatErr) Error() string {
+ return fmt.Sprintf("%s body is invalid %s", e.messageType, e.details)
+}
+
+type writeError struct {
+ err error
+ safeToRetry bool
+}
+
+func (e *writeError) Error() string {
+ return fmt.Sprintf("write failed: %s", e.err.Error())
+}
+
+func (e *writeError) SafeToRetry() bool {
+ return e.safeToRetry
+}
+
+func (e *writeError) Unwrap() error {
+ return e.err
+}
+
+type ExceededMaxBodyLenErr struct {
+ MaxExpectedBodyLen int
+ ActualBodyLen int
+}
+
+func (e *ExceededMaxBodyLenErr) Error() string {
+ return fmt.Sprintf("invalid body length: expected at most %d, but got %d", e.MaxExpectedBodyLen, e.ActualBodyLen)
+}
+
+// getValueFromJSON gets the value from a protocol message representation in JSON.
+func getValueFromJSON(v map[string]string) ([]byte, error) {
+ if v == nil {
+ return nil, nil
+ }
+ if text, ok := v["text"]; ok {
+ return []byte(text), nil
+ }
+ if binary, ok := v["binary"]; ok {
+ return hex.DecodeString(binary)
+ }
+ return nil, errors.New("unknown protocol representation")
+}
+
+// beginMessage begins a new message of type t. It appends the message type and a placeholder for the message length to
+// dst. It returns the new buffer and the position of the message length placeholder.
+func beginMessage(dst []byte, t byte) ([]byte, int) {
+ dst = append(dst, t)
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+ return dst, sp
+}
+
+// finishMessage finishes a message that was started with beginMessage. It computes the message length and writes it to
+// dst[sp]. If the message length is too large it returns an error. Otherwise it returns the final message buffer.
+func finishMessage(dst []byte, sp int) ([]byte, error) {
+ messageBodyLen := len(dst[sp:])
+ if messageBodyLen > maxMessageBodyLen {
+ return nil, errors.New("message body too large")
+ }
+ pgio.SetInt32(dst[sp:], int32(messageBodyLen))
+ return dst, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/portal_suspended.go b/vendor/github.com/jackc/pgx/v5/pgproto3/portal_suspended.go
new file mode 100644
index 0000000..9e2f8cb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/portal_suspended.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type PortalSuspended struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*PortalSuspended) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *PortalSuspended) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "PortalSuspended", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *PortalSuspended) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 's', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src PortalSuspended) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "PortalSuspended",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/query.go b/vendor/github.com/jackc/pgx/v5/pgproto3/query.go
new file mode 100644
index 0000000..aebdfde
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/query.go
@@ -0,0 +1,45 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type Query struct {
+ String string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Query) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Query) Decode(src []byte) error {
+ i := bytes.IndexByte(src, 0)
+ if i != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "Query"}
+ }
+
+ dst.String = string(src[:i])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Query) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'Q')
+ dst = append(dst, src.String...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Query) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ String string
+ }{
+ Type: "Query",
+ String: src.String,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/ready_for_query.go b/vendor/github.com/jackc/pgx/v5/pgproto3/ready_for_query.go
new file mode 100644
index 0000000..a56af9f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/ready_for_query.go
@@ -0,0 +1,61 @@
+package pgproto3
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+type ReadyForQuery struct {
+ TxStatus byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ReadyForQuery) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ReadyForQuery) Decode(src []byte) error {
+ if len(src) != 1 {
+ return &invalidMessageLenErr{messageType: "ReadyForQuery", expectedLen: 1, actualLen: len(src)}
+ }
+
+ dst.TxStatus = src[0]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ReadyForQuery) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'Z', 0, 0, 0, 5, src.TxStatus), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src ReadyForQuery) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ TxStatus string
+ }{
+ Type: "ReadyForQuery",
+ TxStatus: string(src.TxStatus),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *ReadyForQuery) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ TxStatus string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ if len(msg.TxStatus) != 1 {
+ return errors.New("invalid length for ReadyForQuery.TxStatus")
+ }
+ dst.TxStatus = msg.TxStatus[0]
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/row_description.go b/vendor/github.com/jackc/pgx/v5/pgproto3/row_description.go
new file mode 100644
index 0000000..dc2a4dd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/row_description.go
@@ -0,0 +1,166 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const (
+ TextFormat = 0
+ BinaryFormat = 1
+)
+
+type FieldDescription struct {
+ Name []byte
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier int32
+ Format int16
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (fd FieldDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Name string
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier int32
+ Format int16
+ }{
+ Name: string(fd.Name),
+ TableOID: fd.TableOID,
+ TableAttributeNumber: fd.TableAttributeNumber,
+ DataTypeOID: fd.DataTypeOID,
+ DataTypeSize: fd.DataTypeSize,
+ TypeModifier: fd.TypeModifier,
+ Format: fd.Format,
+ })
+}
+
+type RowDescription struct {
+ Fields []FieldDescription
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*RowDescription) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *RowDescription) Decode(src []byte) error {
+
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+ fieldCount := int(binary.BigEndian.Uint16(src))
+ rp := 2
+
+ dst.Fields = dst.Fields[0:0]
+
+ for i := 0; i < fieldCount; i++ {
+ var fd FieldDescription
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+ fd.Name = src[rp : rp+idx]
+ rp += idx + 1
+
+ // Since buf.Next() doesn't return an error if we hit the end of the buffer
+ // check Len ahead of time
+ if len(src[rp:]) < 18 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+
+ fd.TableOID = binary.BigEndian.Uint32(src[rp:])
+ rp += 4
+ fd.TableAttributeNumber = binary.BigEndian.Uint16(src[rp:])
+ rp += 2
+ fd.DataTypeOID = binary.BigEndian.Uint32(src[rp:])
+ rp += 4
+ fd.DataTypeSize = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ fd.TypeModifier = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ fd.Format = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ dst.Fields = append(dst.Fields, fd)
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *RowDescription) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'T')
+
+ if len(src.Fields) > math.MaxUint16 {
+ return nil, errors.New("too many fields")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.Fields)))
+ for _, fd := range src.Fields {
+ dst = append(dst, fd.Name...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint32(dst, fd.TableOID)
+ dst = pgio.AppendUint16(dst, fd.TableAttributeNumber)
+ dst = pgio.AppendUint32(dst, fd.DataTypeOID)
+ dst = pgio.AppendInt16(dst, fd.DataTypeSize)
+ dst = pgio.AppendInt32(dst, fd.TypeModifier)
+ dst = pgio.AppendInt16(dst, fd.Format)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src RowDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Fields []FieldDescription
+ }{
+ Type: "RowDescription",
+ Fields: src.Fields,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *RowDescription) UnmarshalJSON(data []byte) error {
+ var msg struct {
+ Fields []struct {
+ Name string
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier int32
+ Format int16
+ }
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ dst.Fields = make([]FieldDescription, len(msg.Fields))
+ for n, field := range msg.Fields {
+ dst.Fields[n] = FieldDescription{
+ Name: []byte(field.Name),
+ TableOID: field.TableOID,
+ TableAttributeNumber: field.TableAttributeNumber,
+ DataTypeOID: field.DataTypeOID,
+ DataTypeSize: field.DataTypeSize,
+ TypeModifier: field.TypeModifier,
+ Format: field.Format,
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_initial_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_initial_response.go
new file mode 100644
index 0000000..9eb1b6a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_initial_response.go
@@ -0,0 +1,90 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type SASLInitialResponse struct {
+ AuthMechanism string
+ Data []byte
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*SASLInitialResponse) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *SASLInitialResponse) Decode(src []byte) error {
+ *dst = SASLInitialResponse{}
+
+ rp := 0
+
+ idx := bytes.IndexByte(src, 0)
+ if idx < 0 {
+ return errors.New("invalid SASLInitialResponse")
+ }
+
+ dst.AuthMechanism = string(src[rp:idx])
+ rp = idx + 1
+
+ rp += 4 // The rest of the message is data so we can just skip the size
+ dst.Data = src[rp:]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *SASLInitialResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'p')
+
+ dst = append(dst, []byte(src.AuthMechanism)...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendInt32(dst, int32(len(src.Data)))
+ dst = append(dst, src.Data...)
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src SASLInitialResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ AuthMechanism string
+ Data string
+ }{
+ Type: "SASLInitialResponse",
+ AuthMechanism: src.AuthMechanism,
+ Data: string(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *SASLInitialResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ AuthMechanism string
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ dst.AuthMechanism = msg.AuthMechanism
+ if msg.Data != "" {
+ decoded, err := hex.DecodeString(msg.Data)
+ if err != nil {
+ return err
+ }
+ dst.Data = decoded
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_response.go
new file mode 100644
index 0000000..1b604c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_response.go
@@ -0,0 +1,56 @@
+package pgproto3
+
+import (
+ "encoding/hex"
+ "encoding/json"
+)
+
+type SASLResponse struct {
+ Data []byte
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*SASLResponse) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *SASLResponse) Decode(src []byte) error {
+ *dst = SASLResponse{Data: src}
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *SASLResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'p')
+ dst = append(dst, src.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src SASLResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "SASLResponse",
+ Data: string(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *SASLResponse) UnmarshalJSON(data []byte) error {
+ var msg struct {
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ if msg.Data != "" {
+ decoded, err := hex.DecodeString(msg.Data)
+ if err != nil {
+ return err
+ }
+ dst.Data = decoded
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/ssl_request.go b/vendor/github.com/jackc/pgx/v5/pgproto3/ssl_request.go
new file mode 100644
index 0000000..b0fc284
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/ssl_request.go
@@ -0,0 +1,49 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const sslRequestNumber = 80877103
+
+type SSLRequest struct {
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*SSLRequest) Frontend() {}
+
+func (dst *SSLRequest) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("ssl request too short")
+ }
+
+ requestCode := binary.BigEndian.Uint32(src)
+
+ if requestCode != sslRequestNumber {
+ return errors.New("bad ssl request code")
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 4 byte message length.
+func (src *SSLRequest) Encode(dst []byte) ([]byte, error) {
+ dst = pgio.AppendInt32(dst, 8)
+ dst = pgio.AppendInt32(dst, sslRequestNumber)
+ return dst, nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src SSLRequest) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProtocolVersion uint32
+ Parameters map[string]string
+ }{
+ Type: "SSLRequest",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go b/vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go
new file mode 100644
index 0000000..3af4587
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go
@@ -0,0 +1,94 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const ProtocolVersionNumber = 196608 // 3.0
+
+type StartupMessage struct {
+ ProtocolVersion uint32
+ Parameters map[string]string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*StartupMessage) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *StartupMessage) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("startup message too short")
+ }
+
+ dst.ProtocolVersion = binary.BigEndian.Uint32(src)
+ rp := 4
+
+ if dst.ProtocolVersion != ProtocolVersionNumber {
+ return fmt.Errorf("Bad startup message version number. Expected %d, got %d", ProtocolVersionNumber, dst.ProtocolVersion)
+ }
+
+ dst.Parameters = make(map[string]string)
+ for {
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "StartupMessage"}
+ }
+ key := string(src[rp : rp+idx])
+ rp += idx + 1
+
+ idx = bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "StartupMessage"}
+ }
+ value := string(src[rp : rp+idx])
+ rp += idx + 1
+
+ dst.Parameters[key] = value
+
+ if len(src[rp:]) == 1 {
+ if src[rp] != 0 {
+ return fmt.Errorf("Bad startup message last byte. Expected 0, got %d", src[rp])
+ }
+ break
+ }
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *StartupMessage) Encode(dst []byte) ([]byte, error) {
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint32(dst, src.ProtocolVersion)
+ for k, v := range src.Parameters {
+ dst = append(dst, k...)
+ dst = append(dst, 0)
+ dst = append(dst, v...)
+ dst = append(dst, 0)
+ }
+ dst = append(dst, 0)
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src StartupMessage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProtocolVersion uint32
+ Parameters map[string]string
+ }{
+ Type: "StartupMessage",
+ ProtocolVersion: src.ProtocolVersion,
+ Parameters: src.Parameters,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/sync.go b/vendor/github.com/jackc/pgx/v5/pgproto3/sync.go
new file mode 100644
index 0000000..ea4fc95
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/sync.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Sync struct{}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Sync) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Sync) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Sync", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Sync) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'S', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Sync) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Sync",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/terminate.go b/vendor/github.com/jackc/pgx/v5/pgproto3/terminate.go
new file mode 100644
index 0000000..35a9dc8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/terminate.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Terminate struct{}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Terminate) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Terminate) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Terminate", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Terminate) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'X', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Terminate) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Terminate",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/trace.go b/vendor/github.com/jackc/pgx/v5/pgproto3/trace.go
new file mode 100644
index 0000000..6cc7d3e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/trace.go
@@ -0,0 +1,416 @@
+package pgproto3
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// tracer traces the messages send to and from a Backend or Frontend. The format it produces roughly mimics the
+// format produced by the libpq C function PQtrace.
+type tracer struct {
+ TracerOptions
+
+ mux sync.Mutex
+ w io.Writer
+ buf *bytes.Buffer
+}
+
+// TracerOptions controls tracing behavior. It is roughly equivalent to the libpq function PQsetTraceFlags.
+type TracerOptions struct {
+ // SuppressTimestamps prevents printing of timestamps.
+ SuppressTimestamps bool
+
+ // RegressMode redacts fields that may be vary between executions.
+ RegressMode bool
+}
+
+func (t *tracer) traceMessage(sender byte, encodedLen int32, msg Message) {
+ switch msg := msg.(type) {
+ case *AuthenticationCleartextPassword:
+ t.traceAuthenticationCleartextPassword(sender, encodedLen, msg)
+ case *AuthenticationGSS:
+ t.traceAuthenticationGSS(sender, encodedLen, msg)
+ case *AuthenticationGSSContinue:
+ t.traceAuthenticationGSSContinue(sender, encodedLen, msg)
+ case *AuthenticationMD5Password:
+ t.traceAuthenticationMD5Password(sender, encodedLen, msg)
+ case *AuthenticationOk:
+ t.traceAuthenticationOk(sender, encodedLen, msg)
+ case *AuthenticationSASL:
+ t.traceAuthenticationSASL(sender, encodedLen, msg)
+ case *AuthenticationSASLContinue:
+ t.traceAuthenticationSASLContinue(sender, encodedLen, msg)
+ case *AuthenticationSASLFinal:
+ t.traceAuthenticationSASLFinal(sender, encodedLen, msg)
+ case *BackendKeyData:
+ t.traceBackendKeyData(sender, encodedLen, msg)
+ case *Bind:
+ t.traceBind(sender, encodedLen, msg)
+ case *BindComplete:
+ t.traceBindComplete(sender, encodedLen, msg)
+ case *CancelRequest:
+ t.traceCancelRequest(sender, encodedLen, msg)
+ case *Close:
+ t.traceClose(sender, encodedLen, msg)
+ case *CloseComplete:
+ t.traceCloseComplete(sender, encodedLen, msg)
+ case *CommandComplete:
+ t.traceCommandComplete(sender, encodedLen, msg)
+ case *CopyBothResponse:
+ t.traceCopyBothResponse(sender, encodedLen, msg)
+ case *CopyData:
+ t.traceCopyData(sender, encodedLen, msg)
+ case *CopyDone:
+ t.traceCopyDone(sender, encodedLen, msg)
+ case *CopyFail:
+ t.traceCopyFail(sender, encodedLen, msg)
+ case *CopyInResponse:
+ t.traceCopyInResponse(sender, encodedLen, msg)
+ case *CopyOutResponse:
+ t.traceCopyOutResponse(sender, encodedLen, msg)
+ case *DataRow:
+ t.traceDataRow(sender, encodedLen, msg)
+ case *Describe:
+ t.traceDescribe(sender, encodedLen, msg)
+ case *EmptyQueryResponse:
+ t.traceEmptyQueryResponse(sender, encodedLen, msg)
+ case *ErrorResponse:
+ t.traceErrorResponse(sender, encodedLen, msg)
+ case *Execute:
+ t.TraceQueryute(sender, encodedLen, msg)
+ case *Flush:
+ t.traceFlush(sender, encodedLen, msg)
+ case *FunctionCall:
+ t.traceFunctionCall(sender, encodedLen, msg)
+ case *FunctionCallResponse:
+ t.traceFunctionCallResponse(sender, encodedLen, msg)
+ case *GSSEncRequest:
+ t.traceGSSEncRequest(sender, encodedLen, msg)
+ case *NoData:
+ t.traceNoData(sender, encodedLen, msg)
+ case *NoticeResponse:
+ t.traceNoticeResponse(sender, encodedLen, msg)
+ case *NotificationResponse:
+ t.traceNotificationResponse(sender, encodedLen, msg)
+ case *ParameterDescription:
+ t.traceParameterDescription(sender, encodedLen, msg)
+ case *ParameterStatus:
+ t.traceParameterStatus(sender, encodedLen, msg)
+ case *Parse:
+ t.traceParse(sender, encodedLen, msg)
+ case *ParseComplete:
+ t.traceParseComplete(sender, encodedLen, msg)
+ case *PortalSuspended:
+ t.tracePortalSuspended(sender, encodedLen, msg)
+ case *Query:
+ t.traceQuery(sender, encodedLen, msg)
+ case *ReadyForQuery:
+ t.traceReadyForQuery(sender, encodedLen, msg)
+ case *RowDescription:
+ t.traceRowDescription(sender, encodedLen, msg)
+ case *SSLRequest:
+ t.traceSSLRequest(sender, encodedLen, msg)
+ case *StartupMessage:
+ t.traceStartupMessage(sender, encodedLen, msg)
+ case *Sync:
+ t.traceSync(sender, encodedLen, msg)
+ case *Terminate:
+ t.traceTerminate(sender, encodedLen, msg)
+ default:
+ t.writeTrace(sender, encodedLen, "Unknown", nil)
+ }
+}
+
+func (t *tracer) traceAuthenticationCleartextPassword(sender byte, encodedLen int32, msg *AuthenticationCleartextPassword) {
+ t.writeTrace(sender, encodedLen, "AuthenticationCleartextPassword", nil)
+}
+
+func (t *tracer) traceAuthenticationGSS(sender byte, encodedLen int32, msg *AuthenticationGSS) {
+ t.writeTrace(sender, encodedLen, "AuthenticationGSS", nil)
+}
+
+func (t *tracer) traceAuthenticationGSSContinue(sender byte, encodedLen int32, msg *AuthenticationGSSContinue) {
+ t.writeTrace(sender, encodedLen, "AuthenticationGSSContinue", nil)
+}
+
+func (t *tracer) traceAuthenticationMD5Password(sender byte, encodedLen int32, msg *AuthenticationMD5Password) {
+ t.writeTrace(sender, encodedLen, "AuthenticationMD5Password", nil)
+}
+
+func (t *tracer) traceAuthenticationOk(sender byte, encodedLen int32, msg *AuthenticationOk) {
+ t.writeTrace(sender, encodedLen, "AuthenticationOk", nil)
+}
+
+func (t *tracer) traceAuthenticationSASL(sender byte, encodedLen int32, msg *AuthenticationSASL) {
+ t.writeTrace(sender, encodedLen, "AuthenticationSASL", nil)
+}
+
+func (t *tracer) traceAuthenticationSASLContinue(sender byte, encodedLen int32, msg *AuthenticationSASLContinue) {
+ t.writeTrace(sender, encodedLen, "AuthenticationSASLContinue", nil)
+}
+
+func (t *tracer) traceAuthenticationSASLFinal(sender byte, encodedLen int32, msg *AuthenticationSASLFinal) {
+ t.writeTrace(sender, encodedLen, "AuthenticationSASLFinal", nil)
+}
+
+func (t *tracer) traceBackendKeyData(sender byte, encodedLen int32, msg *BackendKeyData) {
+ t.writeTrace(sender, encodedLen, "BackendKeyData", func() {
+ if t.RegressMode {
+ t.buf.WriteString("\t NNNN NNNN")
+ } else {
+ fmt.Fprintf(t.buf, "\t %d %d", msg.ProcessID, msg.SecretKey)
+ }
+ })
+}
+
+func (t *tracer) traceBind(sender byte, encodedLen int32, msg *Bind) {
+ t.writeTrace(sender, encodedLen, "Bind", func() {
+ fmt.Fprintf(t.buf, "\t %s %s %d", traceDoubleQuotedString([]byte(msg.DestinationPortal)), traceDoubleQuotedString([]byte(msg.PreparedStatement)), len(msg.ParameterFormatCodes))
+ for _, fc := range msg.ParameterFormatCodes {
+ fmt.Fprintf(t.buf, " %d", fc)
+ }
+ fmt.Fprintf(t.buf, " %d", len(msg.Parameters))
+ for _, p := range msg.Parameters {
+ fmt.Fprintf(t.buf, " %s", traceSingleQuotedString(p))
+ }
+ fmt.Fprintf(t.buf, " %d", len(msg.ResultFormatCodes))
+ for _, fc := range msg.ResultFormatCodes {
+ fmt.Fprintf(t.buf, " %d", fc)
+ }
+ })
+}
+
+func (t *tracer) traceBindComplete(sender byte, encodedLen int32, msg *BindComplete) {
+ t.writeTrace(sender, encodedLen, "BindComplete", nil)
+}
+
+func (t *tracer) traceCancelRequest(sender byte, encodedLen int32, msg *CancelRequest) {
+ t.writeTrace(sender, encodedLen, "CancelRequest", nil)
+}
+
+func (t *tracer) traceClose(sender byte, encodedLen int32, msg *Close) {
+ t.writeTrace(sender, encodedLen, "Close", nil)
+}
+
+func (t *tracer) traceCloseComplete(sender byte, encodedLen int32, msg *CloseComplete) {
+ t.writeTrace(sender, encodedLen, "CloseComplete", nil)
+}
+
+func (t *tracer) traceCommandComplete(sender byte, encodedLen int32, msg *CommandComplete) {
+ t.writeTrace(sender, encodedLen, "CommandComplete", func() {
+ fmt.Fprintf(t.buf, "\t %s", traceDoubleQuotedString(msg.CommandTag))
+ })
+}
+
+func (t *tracer) traceCopyBothResponse(sender byte, encodedLen int32, msg *CopyBothResponse) {
+ t.writeTrace(sender, encodedLen, "CopyBothResponse", nil)
+}
+
+func (t *tracer) traceCopyData(sender byte, encodedLen int32, msg *CopyData) {
+ t.writeTrace(sender, encodedLen, "CopyData", nil)
+}
+
+func (t *tracer) traceCopyDone(sender byte, encodedLen int32, msg *CopyDone) {
+ t.writeTrace(sender, encodedLen, "CopyDone", nil)
+}
+
+func (t *tracer) traceCopyFail(sender byte, encodedLen int32, msg *CopyFail) {
+ t.writeTrace(sender, encodedLen, "CopyFail", func() {
+ fmt.Fprintf(t.buf, "\t %s", traceDoubleQuotedString([]byte(msg.Message)))
+ })
+}
+
+func (t *tracer) traceCopyInResponse(sender byte, encodedLen int32, msg *CopyInResponse) {
+ t.writeTrace(sender, encodedLen, "CopyInResponse", nil)
+}
+
+func (t *tracer) traceCopyOutResponse(sender byte, encodedLen int32, msg *CopyOutResponse) {
+ t.writeTrace(sender, encodedLen, "CopyOutResponse", nil)
+}
+
+func (t *tracer) traceDataRow(sender byte, encodedLen int32, msg *DataRow) {
+ t.writeTrace(sender, encodedLen, "DataRow", func() {
+ fmt.Fprintf(t.buf, "\t %d", len(msg.Values))
+ for _, v := range msg.Values {
+ if v == nil {
+ t.buf.WriteString(" -1")
+ } else {
+ fmt.Fprintf(t.buf, " %d %s", len(v), traceSingleQuotedString(v))
+ }
+ }
+ })
+}
+
+func (t *tracer) traceDescribe(sender byte, encodedLen int32, msg *Describe) {
+ t.writeTrace(sender, encodedLen, "Describe", func() {
+ fmt.Fprintf(t.buf, "\t %c %s", msg.ObjectType, traceDoubleQuotedString([]byte(msg.Name)))
+ })
+}
+
+func (t *tracer) traceEmptyQueryResponse(sender byte, encodedLen int32, msg *EmptyQueryResponse) {
+ t.writeTrace(sender, encodedLen, "EmptyQueryResponse", nil)
+}
+
+func (t *tracer) traceErrorResponse(sender byte, encodedLen int32, msg *ErrorResponse) {
+ t.writeTrace(sender, encodedLen, "ErrorResponse", nil)
+}
+
+func (t *tracer) TraceQueryute(sender byte, encodedLen int32, msg *Execute) {
+ t.writeTrace(sender, encodedLen, "Execute", func() {
+ fmt.Fprintf(t.buf, "\t %s %d", traceDoubleQuotedString([]byte(msg.Portal)), msg.MaxRows)
+ })
+}
+
+func (t *tracer) traceFlush(sender byte, encodedLen int32, msg *Flush) {
+ t.writeTrace(sender, encodedLen, "Flush", nil)
+}
+
+func (t *tracer) traceFunctionCall(sender byte, encodedLen int32, msg *FunctionCall) {
+ t.writeTrace(sender, encodedLen, "FunctionCall", nil)
+}
+
+func (t *tracer) traceFunctionCallResponse(sender byte, encodedLen int32, msg *FunctionCallResponse) {
+ t.writeTrace(sender, encodedLen, "FunctionCallResponse", nil)
+}
+
+func (t *tracer) traceGSSEncRequest(sender byte, encodedLen int32, msg *GSSEncRequest) {
+ t.writeTrace(sender, encodedLen, "GSSEncRequest", nil)
+}
+
+func (t *tracer) traceNoData(sender byte, encodedLen int32, msg *NoData) {
+ t.writeTrace(sender, encodedLen, "NoData", nil)
+}
+
+func (t *tracer) traceNoticeResponse(sender byte, encodedLen int32, msg *NoticeResponse) {
+ t.writeTrace(sender, encodedLen, "NoticeResponse", nil)
+}
+
+func (t *tracer) traceNotificationResponse(sender byte, encodedLen int32, msg *NotificationResponse) {
+ t.writeTrace(sender, encodedLen, "NotificationResponse", func() {
+ fmt.Fprintf(t.buf, "\t %d %s %s", msg.PID, traceDoubleQuotedString([]byte(msg.Channel)), traceDoubleQuotedString([]byte(msg.Payload)))
+ })
+}
+
+func (t *tracer) traceParameterDescription(sender byte, encodedLen int32, msg *ParameterDescription) {
+ t.writeTrace(sender, encodedLen, "ParameterDescription", nil)
+}
+
+func (t *tracer) traceParameterStatus(sender byte, encodedLen int32, msg *ParameterStatus) {
+ t.writeTrace(sender, encodedLen, "ParameterStatus", func() {
+ fmt.Fprintf(t.buf, "\t %s %s", traceDoubleQuotedString([]byte(msg.Name)), traceDoubleQuotedString([]byte(msg.Value)))
+ })
+}
+
+func (t *tracer) traceParse(sender byte, encodedLen int32, msg *Parse) {
+ t.writeTrace(sender, encodedLen, "Parse", func() {
+ fmt.Fprintf(t.buf, "\t %s %s %d", traceDoubleQuotedString([]byte(msg.Name)), traceDoubleQuotedString([]byte(msg.Query)), len(msg.ParameterOIDs))
+ for _, oid := range msg.ParameterOIDs {
+ fmt.Fprintf(t.buf, " %d", oid)
+ }
+ })
+}
+
+func (t *tracer) traceParseComplete(sender byte, encodedLen int32, msg *ParseComplete) {
+ t.writeTrace(sender, encodedLen, "ParseComplete", nil)
+}
+
+func (t *tracer) tracePortalSuspended(sender byte, encodedLen int32, msg *PortalSuspended) {
+ t.writeTrace(sender, encodedLen, "PortalSuspended", nil)
+}
+
+func (t *tracer) traceQuery(sender byte, encodedLen int32, msg *Query) {
+ t.writeTrace(sender, encodedLen, "Query", func() {
+ fmt.Fprintf(t.buf, "\t %s", traceDoubleQuotedString([]byte(msg.String)))
+ })
+}
+
+func (t *tracer) traceReadyForQuery(sender byte, encodedLen int32, msg *ReadyForQuery) {
+ t.writeTrace(sender, encodedLen, "ReadyForQuery", func() {
+ fmt.Fprintf(t.buf, "\t %c", msg.TxStatus)
+ })
+}
+
+func (t *tracer) traceRowDescription(sender byte, encodedLen int32, msg *RowDescription) {
+ t.writeTrace(sender, encodedLen, "RowDescription", func() {
+ fmt.Fprintf(t.buf, "\t %d", len(msg.Fields))
+ for _, fd := range msg.Fields {
+ fmt.Fprintf(t.buf, ` %s %d %d %d %d %d %d`, traceDoubleQuotedString(fd.Name), fd.TableOID, fd.TableAttributeNumber, fd.DataTypeOID, fd.DataTypeSize, fd.TypeModifier, fd.Format)
+ }
+ })
+}
+
+func (t *tracer) traceSSLRequest(sender byte, encodedLen int32, msg *SSLRequest) {
+ t.writeTrace(sender, encodedLen, "SSLRequest", nil)
+}
+
+func (t *tracer) traceStartupMessage(sender byte, encodedLen int32, msg *StartupMessage) {
+ t.writeTrace(sender, encodedLen, "StartupMessage", nil)
+}
+
+func (t *tracer) traceSync(sender byte, encodedLen int32, msg *Sync) {
+ t.writeTrace(sender, encodedLen, "Sync", nil)
+}
+
+func (t *tracer) traceTerminate(sender byte, encodedLen int32, msg *Terminate) {
+ t.writeTrace(sender, encodedLen, "Terminate", nil)
+}
+
+func (t *tracer) writeTrace(sender byte, encodedLen int32, msgType string, writeDetails func()) {
+ t.mux.Lock()
+ defer t.mux.Unlock()
+ defer func() {
+ if t.buf.Cap() > 1024 {
+ t.buf = &bytes.Buffer{}
+ } else {
+ t.buf.Reset()
+ }
+ }()
+
+ if !t.SuppressTimestamps {
+ now := time.Now()
+ t.buf.WriteString(now.Format("2006-01-02 15:04:05.000000"))
+ t.buf.WriteByte('\t')
+ }
+
+ t.buf.WriteByte(sender)
+ t.buf.WriteByte('\t')
+ t.buf.WriteString(msgType)
+ t.buf.WriteByte('\t')
+ t.buf.WriteString(strconv.FormatInt(int64(encodedLen), 10))
+
+ if writeDetails != nil {
+ writeDetails()
+ }
+
+ t.buf.WriteByte('\n')
+ t.buf.WriteTo(t.w)
+}
+
+// traceDoubleQuotedString returns t.buf as a double-quoted string without any escaping. It is roughly equivalent to
+// pqTraceOutputString in libpq.
+func traceDoubleQuotedString(buf []byte) string {
+ return `"` + string(buf) + `"`
+}
+
+// traceSingleQuotedString returns buf as a single-quoted string with non-printable characters hex-escaped. It is
+// roughly equivalent to pqTraceOutputNchar in libpq.
+func traceSingleQuotedString(buf []byte) string {
+ sb := &strings.Builder{}
+
+ sb.WriteByte('\'')
+ for _, b := range buf {
+ if b < 32 || b > 126 {
+ fmt.Fprintf(sb, `\x%x`, b)
+ } else {
+ sb.WriteByte(b)
+ }
+ }
+ sb.WriteByte('\'')
+
+ return sb.String()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/array.go b/vendor/github.com/jackc/pgx/v5/pgtype/array.go
new file mode 100644
index 0000000..06b824a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/array.go
@@ -0,0 +1,460 @@
+package pgtype
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// Information on the internals of PostgreSQL arrays can be found in
+// src/include/utils/array.h and src/backend/utils/adt/arrayfuncs.c. Of
+// particular interest is the array_send function.
+
+type arrayHeader struct {
+ ContainsNull bool
+ ElementOID uint32
+ Dimensions []ArrayDimension
+}
+
+type ArrayDimension struct {
+ Length int32
+ LowerBound int32
+}
+
+// cardinality returns the number of elements in an array of dimensions size.
+func cardinality(dimensions []ArrayDimension) int {
+ if len(dimensions) == 0 {
+ return 0
+ }
+
+ elementCount := int(dimensions[0].Length)
+ for _, d := range dimensions[1:] {
+ elementCount *= int(d.Length)
+ }
+
+ return elementCount
+}
+
+func (dst *arrayHeader) DecodeBinary(m *Map, src []byte) (int, error) {
+ if len(src) < 12 {
+ return 0, fmt.Errorf("array header too short: %d", len(src))
+ }
+
+ rp := 0
+
+ numDims := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ dst.ContainsNull = binary.BigEndian.Uint32(src[rp:]) == 1
+ rp += 4
+
+ dst.ElementOID = binary.BigEndian.Uint32(src[rp:])
+ rp += 4
+
+ dst.Dimensions = make([]ArrayDimension, numDims)
+ if len(src) < 12+numDims*8 {
+ return 0, fmt.Errorf("array header too short for %d dimensions: %d", numDims, len(src))
+ }
+ for i := range dst.Dimensions {
+ dst.Dimensions[i].Length = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ dst.Dimensions[i].LowerBound = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ }
+
+ return rp, nil
+}
+
+func (src arrayHeader) EncodeBinary(buf []byte) []byte {
+ buf = pgio.AppendInt32(buf, int32(len(src.Dimensions)))
+
+ var containsNull int32
+ if src.ContainsNull {
+ containsNull = 1
+ }
+ buf = pgio.AppendInt32(buf, containsNull)
+
+ buf = pgio.AppendUint32(buf, src.ElementOID)
+
+ for i := range src.Dimensions {
+ buf = pgio.AppendInt32(buf, src.Dimensions[i].Length)
+ buf = pgio.AppendInt32(buf, src.Dimensions[i].LowerBound)
+ }
+
+ return buf
+}
+
+type untypedTextArray struct {
+ Elements []string
+ Quoted []bool
+ Dimensions []ArrayDimension
+}
+
+func parseUntypedTextArray(src string) (*untypedTextArray, error) {
+ dst := &untypedTextArray{
+ Elements: []string{},
+ Quoted: []bool{},
+ Dimensions: []ArrayDimension{},
+ }
+
+ buf := bytes.NewBufferString(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ var explicitDimensions []ArrayDimension
+
+ // Array has explicit dimensions
+ if r == '[' {
+ buf.UnreadRune()
+
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r == '=' {
+ break
+ } else if r != '[' {
+ return nil, fmt.Errorf("invalid array, expected '[' or '=' got %v", r)
+ }
+
+ lower, err := arrayParseInteger(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r != ':' {
+ return nil, fmt.Errorf("invalid array, expected ':' got %v", r)
+ }
+
+ upper, err := arrayParseInteger(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r != ']' {
+ return nil, fmt.Errorf("invalid array, expected ']' got %v", r)
+ }
+
+ explicitDimensions = append(explicitDimensions, ArrayDimension{LowerBound: lower, Length: upper - lower + 1})
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+ }
+
+ if r != '{' {
+ return nil, fmt.Errorf("invalid array, expected '{' got %v", r)
+ }
+
+ implicitDimensions := []ArrayDimension{{LowerBound: 1, Length: 0}}
+
+ // Consume all initial opening brackets. This provides number of dimensions.
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r == '{' {
+ implicitDimensions[len(implicitDimensions)-1].Length = 1
+ implicitDimensions = append(implicitDimensions, ArrayDimension{LowerBound: 1})
+ } else {
+ buf.UnreadRune()
+ break
+ }
+ }
+ currentDim := len(implicitDimensions) - 1
+ counterDim := currentDim
+
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ switch r {
+ case '{':
+ if currentDim == counterDim {
+ implicitDimensions[currentDim].Length++
+ }
+ currentDim++
+ case ',':
+ case '}':
+ currentDim--
+ if currentDim < counterDim {
+ counterDim = currentDim
+ }
+ default:
+ buf.UnreadRune()
+ value, quoted, err := arrayParseValue(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid array value: %w", err)
+ }
+ if currentDim == counterDim {
+ implicitDimensions[currentDim].Length++
+ }
+ dst.Quoted = append(dst.Quoted, quoted)
+ dst.Elements = append(dst.Elements, value)
+ }
+
+ if currentDim < 0 {
+ break
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, fmt.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ if len(dst.Elements) == 0 {
+ } else if len(explicitDimensions) > 0 {
+ dst.Dimensions = explicitDimensions
+ } else {
+ dst.Dimensions = implicitDimensions
+ }
+
+ return dst, nil
+}
+
+func skipWhitespace(buf *bytes.Buffer) {
+ var r rune
+ var err error
+ for r, _, _ = buf.ReadRune(); unicode.IsSpace(r); r, _, _ = buf.ReadRune() {
+ }
+
+ if err != io.EOF {
+ buf.UnreadRune()
+ }
+}
+
+func arrayParseValue(buf *bytes.Buffer) (string, bool, error) {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+ if r == '"' {
+ return arrayParseQuotedValue(buf)
+ }
+ buf.UnreadRune()
+
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+
+ switch r {
+ case ',', '}':
+ buf.UnreadRune()
+ return s.String(), false, nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+func arrayParseQuotedValue(buf *bytes.Buffer) (string, bool, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+ case '"':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+ buf.UnreadRune()
+ return s.String(), true, nil
+ }
+ s.WriteRune(r)
+ }
+}
+
+func arrayParseInteger(buf *bytes.Buffer) (int32, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return 0, err
+ }
+
+ if ('0' <= r && r <= '9') || r == '-' {
+ s.WriteRune(r)
+ } else {
+ buf.UnreadRune()
+ n, err := strconv.ParseInt(s.String(), 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(n), nil
+ }
+ }
+}
+
+func encodeTextArrayDimensions(buf []byte, dimensions []ArrayDimension) []byte {
+ var customDimensions bool
+ for _, dim := range dimensions {
+ if dim.LowerBound != 1 {
+ customDimensions = true
+ }
+ }
+
+ if !customDimensions {
+ return buf
+ }
+
+ for _, dim := range dimensions {
+ buf = append(buf, '[')
+ buf = append(buf, strconv.FormatInt(int64(dim.LowerBound), 10)...)
+ buf = append(buf, ':')
+ buf = append(buf, strconv.FormatInt(int64(dim.LowerBound+dim.Length-1), 10)...)
+ buf = append(buf, ']')
+ }
+
+ return append(buf, '=')
+}
+
+var quoteArrayReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
+
+func quoteArrayElement(src string) string {
+ return `"` + quoteArrayReplacer.Replace(src) + `"`
+}
+
+func isSpace(ch byte) bool {
+ // see array_isspace:
+ // https://github.com/postgres/postgres/blob/master/src/backend/utils/adt/arrayfuncs.c
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' || ch == '\v' || ch == '\f'
+}
+
+func quoteArrayElementIfNeeded(src string) string {
+ if src == "" || (len(src) == 4 && strings.EqualFold(src, "null")) || isSpace(src[0]) || isSpace(src[len(src)-1]) || strings.ContainsAny(src, `{},"\`) {
+ return quoteArrayElement(src)
+ }
+ return src
+}
+
+// Array represents a PostgreSQL array for T. It implements the ArrayGetter and ArraySetter interfaces. It preserves
+// PostgreSQL dimensions and custom lower bounds. Use FlatArray if these are not needed.
+type Array[T any] struct {
+ Elements []T
+ Dims []ArrayDimension
+ Valid bool
+}
+
+func (a Array[T]) Dimensions() []ArrayDimension {
+ return a.Dims
+}
+
+func (a Array[T]) Index(i int) any {
+ return a.Elements[i]
+}
+
+func (a Array[T]) IndexType() any {
+ var el T
+ return el
+}
+
+func (a *Array[T]) SetDimensions(dimensions []ArrayDimension) error {
+ if dimensions == nil {
+ *a = Array[T]{}
+ return nil
+ }
+
+ elementCount := cardinality(dimensions)
+ *a = Array[T]{
+ Elements: make([]T, elementCount),
+ Dims: dimensions,
+ Valid: true,
+ }
+
+ return nil
+}
+
+func (a Array[T]) ScanIndex(i int) any {
+ return &a.Elements[i]
+}
+
+func (a Array[T]) ScanIndexType() any {
+ return new(T)
+}
+
+// FlatArray implements the ArrayGetter and ArraySetter interfaces for any slice of T. It ignores PostgreSQL dimensions
+// and custom lower bounds. Use Array to preserve these.
+type FlatArray[T any] []T
+
+func (a FlatArray[T]) Dimensions() []ArrayDimension {
+ if a == nil {
+ return nil
+ }
+
+ return []ArrayDimension{{Length: int32(len(a)), LowerBound: 1}}
+}
+
+func (a FlatArray[T]) Index(i int) any {
+ return a[i]
+}
+
+func (a FlatArray[T]) IndexType() any {
+ var el T
+ return el
+}
+
+func (a *FlatArray[T]) SetDimensions(dimensions []ArrayDimension) error {
+ if dimensions == nil {
+ *a = nil
+ return nil
+ }
+
+ elementCount := cardinality(dimensions)
+ *a = make(FlatArray[T], elementCount)
+ return nil
+}
+
+func (a FlatArray[T]) ScanIndex(i int) any {
+ return &a[i]
+}
+
+func (a FlatArray[T]) ScanIndexType() any {
+ return new(T)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go
new file mode 100644
index 0000000..bf5f698
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go
@@ -0,0 +1,405 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "reflect"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// ArrayGetter is a type that can be converted into a PostgreSQL array.
+type ArrayGetter interface {
+ // Dimensions returns the array dimensions. If array is nil then nil is returned.
+ Dimensions() []ArrayDimension
+
+ // Index returns the element at i.
+ Index(i int) any
+
+ // IndexType returns a non-nil scan target of the type Index will return. This is used by ArrayCodec.PlanEncode.
+ IndexType() any
+}
+
+// ArraySetter is a type can be set from a PostgreSQL array.
+type ArraySetter interface {
+ // SetDimensions prepares the value such that ScanIndex can be called for each element. This will remove any existing
+ // elements. dimensions may be nil to indicate a NULL array. If unable to exactly preserve dimensions SetDimensions
+ // may return an error or silently flatten the array dimensions.
+ SetDimensions(dimensions []ArrayDimension) error
+
+ // ScanIndex returns a value usable as a scan target for i. SetDimensions must be called before ScanIndex.
+ ScanIndex(i int) any
+
+ // ScanIndexType returns a non-nil scan target of the type ScanIndex will return. This is used by
+ // ArrayCodec.PlanScan.
+ ScanIndexType() any
+}
+
+// ArrayCodec is a codec for any array type.
+type ArrayCodec struct {
+ ElementType *Type
+}
+
+func (c *ArrayCodec) FormatSupported(format int16) bool {
+ return c.ElementType.Codec.FormatSupported(format)
+}
+
+func (c *ArrayCodec) PreferredFormat() int16 {
+ // The binary format should always be preferred for arrays if it is supported. Usually, this will happen automatically
+ // because most types that support binary prefer it. However, text, json, and jsonb support binary but prefer the text
+ // format. This is because it is simpler for jsonb and PostgreSQL can be significantly faster using the text format
+ // for text-like data types than binary. However, arrays appear to always be faster in binary.
+ //
+ // https://www.postgresql.org/message-id/CAMovtNoHFod2jMAKQjjxv209PCTJx5Kc66anwWvX0mEiaXwgmA%40mail.gmail.com
+ if c.ElementType.Codec.FormatSupported(BinaryFormatCode) {
+ return BinaryFormatCode
+ }
+ return TextFormatCode
+}
+
+func (c *ArrayCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ arrayValuer, ok := value.(ArrayGetter)
+ if !ok {
+ return nil
+ }
+
+ elementType := arrayValuer.IndexType()
+
+ elementEncodePlan := m.PlanEncode(c.ElementType.OID, format, elementType)
+ if elementEncodePlan == nil {
+ if reflect.TypeOf(elementType) != nil {
+ return nil
+ }
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return &encodePlanArrayCodecBinary{ac: c, m: m, oid: oid}
+ case TextFormatCode:
+ return &encodePlanArrayCodecText{ac: c, m: m, oid: oid}
+ }
+
+ return nil
+}
+
+type encodePlanArrayCodecText struct {
+ ac *ArrayCodec
+ m *Map
+ oid uint32
+}
+
+func (p *encodePlanArrayCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ array := value.(ArrayGetter)
+
+ dimensions := array.Dimensions()
+ if dimensions == nil {
+ return nil, nil
+ }
+
+ elementCount := cardinality(dimensions)
+ if elementCount == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = encodeTextArrayDimensions(buf, dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(dimensions))
+ dimElemCounts[len(dimensions)-1] = int(dimensions[len(dimensions)-1].Length)
+ for i := len(dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ var encodePlan EncodePlan
+ var lastElemType reflect.Type
+ inElemBuf := make([]byte, 0, 32)
+ for i := 0; i < elementCount; i++ {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elem := array.Index(i)
+ var elemBuf []byte
+ if elem != nil {
+ elemType := reflect.TypeOf(elem)
+ if lastElemType != elemType {
+ lastElemType = elemType
+ encodePlan = p.m.PlanEncode(p.ac.ElementType.OID, TextFormatCode, elem)
+ if encodePlan == nil {
+ return nil, fmt.Errorf("unable to encode %v", array.Index(i))
+ }
+ }
+ elemBuf, err = encodePlan.Encode(elem, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, quoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+type encodePlanArrayCodecBinary struct {
+ ac *ArrayCodec
+ m *Map
+ oid uint32
+}
+
+func (p *encodePlanArrayCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ array := value.(ArrayGetter)
+
+ dimensions := array.Dimensions()
+ if dimensions == nil {
+ return nil, nil
+ }
+
+ arrayHeader := arrayHeader{
+ Dimensions: dimensions,
+ ElementOID: p.ac.ElementType.OID,
+ }
+
+ containsNullIndex := len(buf) + 4
+
+ buf = arrayHeader.EncodeBinary(buf)
+
+ elementCount := cardinality(dimensions)
+
+ var encodePlan EncodePlan
+ var lastElemType reflect.Type
+ for i := 0; i < elementCount; i++ {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elem := array.Index(i)
+ var elemBuf []byte
+ if elem != nil {
+ elemType := reflect.TypeOf(elem)
+ if lastElemType != elemType {
+ lastElemType = elemType
+ encodePlan = p.m.PlanEncode(p.ac.ElementType.OID, BinaryFormatCode, elem)
+ if encodePlan == nil {
+ return nil, fmt.Errorf("unable to encode %v", array.Index(i))
+ }
+ }
+ elemBuf, err = encodePlan.Encode(elem, buf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if elemBuf == nil {
+ pgio.SetInt32(buf[containsNullIndex:], 1)
+ } else {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+func (c *ArrayCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ arrayScanner, ok := target.(ArraySetter)
+ if !ok {
+ return nil
+ }
+
+ // target / arrayScanner might be a pointer to a nil. If it is create one so we can call ScanIndexType to plan the
+ // scan of the elements.
+ if isNil, _ := isNilDriverValuer(target); isNil {
+ arrayScanner = reflect.New(reflect.TypeOf(target).Elem()).Interface().(ArraySetter)
+ }
+
+ elementType := arrayScanner.ScanIndexType()
+
+ elementScanPlan := m.PlanScan(c.ElementType.OID, format, elementType)
+ if _, ok := elementScanPlan.(*scanPlanFail); ok {
+ return nil
+ }
+
+ return &scanPlanArrayCodec{
+ arrayCodec: c,
+ m: m,
+ oid: oid,
+ formatCode: format,
+ }
+}
+
+func (c *ArrayCodec) decodeBinary(m *Map, arrayOID uint32, src []byte, array ArraySetter) error {
+ var arrayHeader arrayHeader
+ rp, err := arrayHeader.DecodeBinary(m, src)
+ if err != nil {
+ return err
+ }
+
+ err = array.SetDimensions(arrayHeader.Dimensions)
+ if err != nil {
+ return err
+ }
+
+ elementCount := cardinality(arrayHeader.Dimensions)
+ if elementCount == 0 {
+ return nil
+ }
+
+ elementScanPlan := c.ElementType.Codec.PlanScan(m, c.ElementType.OID, BinaryFormatCode, array.ScanIndex(0))
+ if elementScanPlan == nil {
+ elementScanPlan = m.PlanScan(c.ElementType.OID, BinaryFormatCode, array.ScanIndex(0))
+ }
+
+ for i := 0; i < elementCount; i++ {
+ elem := array.ScanIndex(i)
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elementScanPlan.Scan(elemSrc, elem)
+ if err != nil {
+ return fmt.Errorf("failed to scan array element %d: %w", i, err)
+ }
+ }
+
+ return nil
+}
+
+func (c *ArrayCodec) decodeText(m *Map, arrayOID uint32, src []byte, array ArraySetter) error {
+ uta, err := parseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ err = array.SetDimensions(uta.Dimensions)
+ if err != nil {
+ return err
+ }
+
+ if len(uta.Elements) == 0 {
+ return nil
+ }
+
+ elementScanPlan := c.ElementType.Codec.PlanScan(m, c.ElementType.OID, TextFormatCode, array.ScanIndex(0))
+ if elementScanPlan == nil {
+ elementScanPlan = m.PlanScan(c.ElementType.OID, TextFormatCode, array.ScanIndex(0))
+ }
+
+ for i, s := range uta.Elements {
+ elem := array.ScanIndex(i)
+ var elemSrc []byte
+ if s != "NULL" || uta.Quoted[i] {
+ elemSrc = []byte(s)
+ }
+
+ err = elementScanPlan.Scan(elemSrc, elem)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type scanPlanArrayCodec struct {
+ arrayCodec *ArrayCodec
+ m *Map
+ oid uint32
+ formatCode int16
+ elementScanPlan ScanPlan
+}
+
+func (spac *scanPlanArrayCodec) Scan(src []byte, dst any) error {
+ c := spac.arrayCodec
+ m := spac.m
+ oid := spac.oid
+ formatCode := spac.formatCode
+
+ array := dst.(ArraySetter)
+
+ if src == nil {
+ return array.SetDimensions(nil)
+ }
+
+ switch formatCode {
+ case BinaryFormatCode:
+ return c.decodeBinary(m, oid, src, array)
+ case TextFormatCode:
+ return c.decodeText(m, oid, src, array)
+ default:
+ return fmt.Errorf("unknown format code %d", formatCode)
+ }
+}
+
+func (c *ArrayCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (c *ArrayCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var slice []any
+ err := m.PlanScan(oid, format, &slice).Scan(src, &slice)
+ return slice, err
+}
+
+func isRagged(slice reflect.Value) bool {
+ if slice.Type().Elem().Kind() != reflect.Slice {
+ return false
+ }
+
+ sliceLen := slice.Len()
+ innerLen := 0
+ for i := 0; i < sliceLen; i++ {
+ if i == 0 {
+ innerLen = slice.Index(i).Len()
+ } else {
+ if slice.Index(i).Len() != innerLen {
+ return true
+ }
+ }
+ if isRagged(slice.Index(i)) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/bits.go b/vendor/github.com/jackc/pgx/v5/pgtype/bits.go
new file mode 100644
index 0000000..e7a1d01
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/bits.go
@@ -0,0 +1,210 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type BitsScanner interface {
+ ScanBits(v Bits) error
+}
+
+type BitsValuer interface {
+ BitsValue() (Bits, error)
+}
+
+// Bits represents the PostgreSQL bit and varbit types.
+type Bits struct {
+ Bytes []byte
+ Len int32 // Number of bits
+ Valid bool
+}
+
+func (b *Bits) ScanBits(v Bits) error {
+ *b = v
+ return nil
+}
+
+func (b Bits) BitsValue() (Bits, error) {
+ return b, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bits) Scan(src any) error {
+ if src == nil {
+ *dst = Bits{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToBitsScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Bits) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := BitsCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type BitsCodec struct{}
+
+func (BitsCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (BitsCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (BitsCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(BitsValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanBitsCodecBinary{}
+ case TextFormatCode:
+ return encodePlanBitsCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanBitsCodecBinary struct{}
+
+func (encodePlanBitsCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ bits, err := value.(BitsValuer).BitsValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !bits.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendInt32(buf, bits.Len)
+ return append(buf, bits.Bytes...), nil
+}
+
+type encodePlanBitsCodecText struct{}
+
+func (encodePlanBitsCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ bits, err := value.(BitsValuer).BitsValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !bits.Valid {
+ return nil, nil
+ }
+
+ for i := int32(0); i < bits.Len; i++ {
+ byteIdx := i / 8
+ bitMask := byte(128 >> byte(i%8))
+ char := byte('0')
+ if bits.Bytes[byteIdx]&bitMask > 0 {
+ char = '1'
+ }
+ buf = append(buf, char)
+ }
+
+ return buf, nil
+}
+
+func (BitsCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case BitsScanner:
+ return scanPlanBinaryBitsToBitsScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case BitsScanner:
+ return scanPlanTextAnyToBitsScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c BitsCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c BitsCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var box Bits
+ err := codecScan(c, m, oid, format, src, &box)
+ if err != nil {
+ return nil, err
+ }
+ return box, nil
+}
+
+type scanPlanBinaryBitsToBitsScanner struct{}
+
+func (scanPlanBinaryBitsToBitsScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BitsScanner)
+
+ if src == nil {
+ return scanner.ScanBits(Bits{})
+ }
+
+ if len(src) < 4 {
+ return fmt.Errorf("invalid length for bit/varbit: %v", len(src))
+ }
+
+ bitLen := int32(binary.BigEndian.Uint32(src))
+ rp := 4
+ buf := make([]byte, len(src[rp:]))
+ copy(buf, src[rp:])
+
+ return scanner.ScanBits(Bits{Bytes: buf, Len: bitLen, Valid: true})
+}
+
+type scanPlanTextAnyToBitsScanner struct{}
+
+func (scanPlanTextAnyToBitsScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BitsScanner)
+
+ if src == nil {
+ return scanner.ScanBits(Bits{})
+ }
+
+ bitLen := len(src)
+ byteLen := bitLen / 8
+ if bitLen%8 > 0 {
+ byteLen++
+ }
+ buf := make([]byte, byteLen)
+
+ for i, b := range src {
+ if b == '1' {
+ byteIdx := i / 8
+ bitIdx := uint(i % 8)
+ buf[byteIdx] = buf[byteIdx] | (128 >> bitIdx)
+ }
+ }
+
+ return scanner.ScanBits(Bits{Bytes: buf, Len: int32(bitLen), Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/bool.go b/vendor/github.com/jackc/pgx/v5/pgtype/bool.go
new file mode 100644
index 0000000..71caffa
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/bool.go
@@ -0,0 +1,343 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type BoolScanner interface {
+ ScanBool(v Bool) error
+}
+
+type BoolValuer interface {
+ BoolValue() (Bool, error)
+}
+
+type Bool struct {
+ Bool bool
+ Valid bool
+}
+
+func (b *Bool) ScanBool(v Bool) error {
+ *b = v
+ return nil
+}
+
+func (b Bool) BoolValue() (Bool, error) {
+ return b, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bool) Scan(src any) error {
+ if src == nil {
+ *dst = Bool{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case bool:
+ *dst = Bool{Bool: src, Valid: true}
+ return nil
+ case string:
+ b, err := strconv.ParseBool(src)
+ if err != nil {
+ return err
+ }
+ *dst = Bool{Bool: b, Valid: true}
+ return nil
+ case []byte:
+ b, err := strconv.ParseBool(string(src))
+ if err != nil {
+ return err
+ }
+ *dst = Bool{Bool: b, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Bool) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ return src.Bool, nil
+}
+
+func (src Bool) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ if src.Bool {
+ return []byte("true"), nil
+ } else {
+ return []byte("false"), nil
+ }
+}
+
+func (dst *Bool) UnmarshalJSON(b []byte) error {
+ var v *bool
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+
+ if v == nil {
+ *dst = Bool{}
+ } else {
+ *dst = Bool{Bool: *v, Valid: true}
+ }
+
+ return nil
+}
+
+type BoolCodec struct{}
+
+func (BoolCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (BoolCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (BoolCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case bool:
+ return encodePlanBoolCodecBinaryBool{}
+ case BoolValuer:
+ return encodePlanBoolCodecBinaryBoolValuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case bool:
+ return encodePlanBoolCodecTextBool{}
+ case BoolValuer:
+ return encodePlanBoolCodecTextBoolValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanBoolCodecBinaryBool struct{}
+
+func (encodePlanBoolCodecBinaryBool) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(bool)
+
+ if v {
+ buf = append(buf, 1)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ return buf, nil
+}
+
+type encodePlanBoolCodecTextBoolValuer struct{}
+
+func (encodePlanBoolCodecTextBoolValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b, err := value.(BoolValuer).BoolValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !b.Valid {
+ return nil, nil
+ }
+
+ if b.Bool {
+ buf = append(buf, 't')
+ } else {
+ buf = append(buf, 'f')
+ }
+
+ return buf, nil
+}
+
+type encodePlanBoolCodecBinaryBoolValuer struct{}
+
+func (encodePlanBoolCodecBinaryBoolValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b, err := value.(BoolValuer).BoolValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !b.Valid {
+ return nil, nil
+ }
+
+ if b.Bool {
+ buf = append(buf, 1)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ return buf, nil
+}
+
+type encodePlanBoolCodecTextBool struct{}
+
+func (encodePlanBoolCodecTextBool) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(bool)
+
+ if v {
+ buf = append(buf, 't')
+ } else {
+ buf = append(buf, 'f')
+ }
+
+ return buf, nil
+}
+
+func (BoolCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *bool:
+ return scanPlanBinaryBoolToBool{}
+ case BoolScanner:
+ return scanPlanBinaryBoolToBoolScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *bool:
+ return scanPlanTextAnyToBool{}
+ case BoolScanner:
+ return scanPlanTextAnyToBoolScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c BoolCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c BoolCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var b bool
+ err := codecScan(c, m, oid, format, src, &b)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+type scanPlanBinaryBoolToBool struct{}
+
+func (scanPlanBinaryBoolToBool) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 1 {
+ return fmt.Errorf("invalid length for bool: %v", len(src))
+ }
+
+ p, ok := (dst).(*bool)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = src[0] == 1
+
+ return nil
+}
+
+type scanPlanTextAnyToBool struct{}
+
+func (scanPlanTextAnyToBool) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) == 0 {
+ return fmt.Errorf("cannot scan empty string into %T", dst)
+ }
+
+ p, ok := (dst).(*bool)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ v, err := planTextToBool(src)
+ if err != nil {
+ return err
+ }
+
+ *p = v
+
+ return nil
+}
+
+type scanPlanBinaryBoolToBoolScanner struct{}
+
+func (scanPlanBinaryBoolToBoolScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(BoolScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanBool(Bool{})
+ }
+
+ if len(src) != 1 {
+ return fmt.Errorf("invalid length for bool: %v", len(src))
+ }
+
+ return s.ScanBool(Bool{Bool: src[0] == 1, Valid: true})
+}
+
+type scanPlanTextAnyToBoolScanner struct{}
+
+func (scanPlanTextAnyToBoolScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(BoolScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanBool(Bool{})
+ }
+
+ if len(src) == 0 {
+ return fmt.Errorf("cannot scan empty string into %T", dst)
+ }
+
+ v, err := planTextToBool(src)
+ if err != nil {
+ return err
+ }
+
+ return s.ScanBool(Bool{Bool: v, Valid: true})
+}
+
+// https://www.postgresql.org/docs/11/datatype-boolean.html
+func planTextToBool(src []byte) (bool, error) {
+ s := string(bytes.ToLower(bytes.TrimSpace(src)))
+
+ switch {
+ case strings.HasPrefix("true", s), strings.HasPrefix("yes", s), s == "on", s == "1":
+ return true, nil
+ case strings.HasPrefix("false", s), strings.HasPrefix("no", s), strings.HasPrefix("off", s), s == "0":
+ return false, nil
+ default:
+ return false, fmt.Errorf("unknown boolean string representation %q", src)
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/box.go b/vendor/github.com/jackc/pgx/v5/pgtype/box.go
new file mode 100644
index 0000000..887d268
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/box.go
@@ -0,0 +1,238 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type BoxScanner interface {
+ ScanBox(v Box) error
+}
+
+type BoxValuer interface {
+ BoxValue() (Box, error)
+}
+
+type Box struct {
+ P [2]Vec2
+ Valid bool
+}
+
+func (b *Box) ScanBox(v Box) error {
+ *b = v
+ return nil
+}
+
+func (b Box) BoxValue() (Box, error) {
+ return b, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Box) Scan(src any) error {
+ if src == nil {
+ *dst = Box{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToBoxScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Box) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := BoxCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type BoxCodec struct{}
+
+func (BoxCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (BoxCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (BoxCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(BoxValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanBoxCodecBinary{}
+ case TextFormatCode:
+ return encodePlanBoxCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanBoxCodecBinary struct{}
+
+func (encodePlanBoxCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ box, err := value.(BoxValuer).BoxValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !box.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(box.P[0].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(box.P[0].Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(box.P[1].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(box.P[1].Y))
+ return buf, nil
+}
+
+type encodePlanBoxCodecText struct{}
+
+func (encodePlanBoxCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ box, err := value.(BoxValuer).BoxValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !box.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%s,%s),(%s,%s)`,
+ strconv.FormatFloat(box.P[0].X, 'f', -1, 64),
+ strconv.FormatFloat(box.P[0].Y, 'f', -1, 64),
+ strconv.FormatFloat(box.P[1].X, 'f', -1, 64),
+ strconv.FormatFloat(box.P[1].Y, 'f', -1, 64),
+ )...)
+ return buf, nil
+}
+
+func (BoxCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case BoxScanner:
+ return scanPlanBinaryBoxToBoxScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case BoxScanner:
+ return scanPlanTextAnyToBoxScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryBoxToBoxScanner struct{}
+
+func (scanPlanBinaryBoxToBoxScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BoxScanner)
+
+ if src == nil {
+ return scanner.ScanBox(Box{})
+ }
+
+ if len(src) != 32 {
+ return fmt.Errorf("invalid length for Box: %v", len(src))
+ }
+
+ x1 := binary.BigEndian.Uint64(src)
+ y1 := binary.BigEndian.Uint64(src[8:])
+ x2 := binary.BigEndian.Uint64(src[16:])
+ y2 := binary.BigEndian.Uint64(src[24:])
+
+ return scanner.ScanBox(Box{
+ P: [2]Vec2{
+ {math.Float64frombits(x1), math.Float64frombits(y1)},
+ {math.Float64frombits(x2), math.Float64frombits(y2)},
+ },
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToBoxScanner struct{}
+
+func (scanPlanTextAnyToBoxScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BoxScanner)
+
+ if src == nil {
+ return scanner.ScanBox(Box{})
+ }
+
+ if len(src) < 11 {
+ return fmt.Errorf("invalid length for Box: %v", len(src))
+ }
+
+ str := string(src[1:])
+
+ var end int
+ end = strings.IndexByte(str, ',')
+
+ x1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+3:]
+ end = strings.IndexByte(str, ',')
+
+ x2, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1 : len(str)-1]
+
+ y2, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanBox(Box{P: [2]Vec2{{x1, y1}, {x2, y2}}, Valid: true})
+}
+
+func (c BoxCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c BoxCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var box Box
+ err := codecScan(c, m, oid, format, src, &box)
+ if err != nil {
+ return nil, err
+ }
+ return box, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go b/vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go
new file mode 100644
index 0000000..b39d3fa
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go
@@ -0,0 +1,952 @@
+package pgtype
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "net"
+ "net/netip"
+ "reflect"
+ "time"
+)
+
+type int8Wrapper int8
+
+func (w int8Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *int8Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int8")
+ }
+
+ if v.Int64 < math.MinInt8 {
+ return fmt.Errorf("%d is less than minimum value for int8", v.Int64)
+ }
+ if v.Int64 > math.MaxInt8 {
+ return fmt.Errorf("%d is greater than maximum value for int8", v.Int64)
+ }
+ *w = int8Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w int8Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type int16Wrapper int16
+
+func (w int16Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *int16Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int16")
+ }
+
+ if v.Int64 < math.MinInt16 {
+ return fmt.Errorf("%d is less than minimum value for int16", v.Int64)
+ }
+ if v.Int64 > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for int16", v.Int64)
+ }
+ *w = int16Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w int16Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type int32Wrapper int32
+
+func (w int32Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *int32Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int32")
+ }
+
+ if v.Int64 < math.MinInt32 {
+ return fmt.Errorf("%d is less than minimum value for int32", v.Int64)
+ }
+ if v.Int64 > math.MaxInt32 {
+ return fmt.Errorf("%d is greater than maximum value for int32", v.Int64)
+ }
+ *w = int32Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w int32Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type int64Wrapper int64
+
+func (w int64Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *int64Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int64")
+ }
+
+ *w = int64Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w int64Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type intWrapper int
+
+func (w intWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *intWrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int")
+ }
+
+ if v.Int64 < math.MinInt {
+ return fmt.Errorf("%d is less than minimum value for int", v.Int64)
+ }
+ if v.Int64 > math.MaxInt {
+ return fmt.Errorf("%d is greater than maximum value for int", v.Int64)
+ }
+
+ *w = intWrapper(v.Int64)
+
+ return nil
+}
+
+func (w intWrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type uint8Wrapper uint8
+
+func (w uint8Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uint8Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint8")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint8", v.Int64)
+ }
+ if v.Int64 > math.MaxUint8 {
+ return fmt.Errorf("%d is greater than maximum value for uint8", v.Int64)
+ }
+ *w = uint8Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w uint8Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type uint16Wrapper uint16
+
+func (w uint16Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uint16Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint16")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint16", v.Int64)
+ }
+ if v.Int64 > math.MaxUint16 {
+ return fmt.Errorf("%d is greater than maximum value for uint16", v.Int64)
+ }
+ *w = uint16Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w uint16Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type uint32Wrapper uint32
+
+func (w uint32Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uint32Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint32")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint32", v.Int64)
+ }
+ if v.Int64 > math.MaxUint32 {
+ return fmt.Errorf("%d is greater than maximum value for uint32", v.Int64)
+ }
+ *w = uint32Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w uint32Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type uint64Wrapper uint64
+
+func (w uint64Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uint64Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint64")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", v.Int64)
+ }
+
+ *w = uint64Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w uint64Wrapper) Int64Value() (Int8, error) {
+ if uint64(w) > uint64(math.MaxInt64) {
+ return Int8{}, fmt.Errorf("%d is greater than maximum value for int64", w)
+ }
+
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+func (w *uint64Wrapper) ScanNumeric(v Numeric) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint64")
+ }
+
+ bi, err := v.toBigInt()
+ if err != nil {
+ return fmt.Errorf("cannot scan into *uint64: %w", err)
+ }
+
+ if !bi.IsUint64() {
+ return fmt.Errorf("cannot scan %v into *uint64", bi.String())
+ }
+
+ *w = uint64Wrapper(bi.Uint64())
+
+ return nil
+}
+
+func (w uint64Wrapper) NumericValue() (Numeric, error) {
+ return Numeric{Int: new(big.Int).SetUint64(uint64(w)), Valid: true}, nil
+}
+
+type uintWrapper uint
+
+func (w uintWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uintWrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint64")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", v.Int64)
+ }
+
+ if uint64(v.Int64) > math.MaxUint {
+ return fmt.Errorf("%d is greater than maximum value for uint", v.Int64)
+ }
+
+ *w = uintWrapper(v.Int64)
+
+ return nil
+}
+
+func (w uintWrapper) Int64Value() (Int8, error) {
+ if uint64(w) > uint64(math.MaxInt64) {
+ return Int8{}, fmt.Errorf("%d is greater than maximum value for int64", w)
+ }
+
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+func (w *uintWrapper) ScanNumeric(v Numeric) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint")
+ }
+
+ bi, err := v.toBigInt()
+ if err != nil {
+ return fmt.Errorf("cannot scan into *uint: %w", err)
+ }
+
+ if !bi.IsUint64() {
+ return fmt.Errorf("cannot scan %v into *uint", bi.String())
+ }
+
+ ui := bi.Uint64()
+
+ if math.MaxUint < ui {
+ return fmt.Errorf("cannot scan %v into *uint", ui)
+ }
+
+ *w = uintWrapper(ui)
+
+ return nil
+}
+
+func (w uintWrapper) NumericValue() (Numeric, error) {
+ return Numeric{Int: new(big.Int).SetUint64(uint64(w)), Valid: true}, nil
+}
+
+type float32Wrapper float32
+
+func (w float32Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *float32Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *float32")
+ }
+
+ *w = float32Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w float32Wrapper) Int64Value() (Int8, error) {
+ if w > math.MaxInt64 {
+ return Int8{}, fmt.Errorf("%f is greater than maximum value for int64", w)
+ }
+
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+func (w *float32Wrapper) ScanFloat64(v Float8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *float32")
+ }
+
+ *w = float32Wrapper(v.Float64)
+
+ return nil
+}
+
+func (w float32Wrapper) Float64Value() (Float8, error) {
+ return Float8{Float64: float64(w), Valid: true}, nil
+}
+
+type float64Wrapper float64
+
+func (w float64Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *float64Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *float64")
+ }
+
+ *w = float64Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w float64Wrapper) Int64Value() (Int8, error) {
+ if w > math.MaxInt64 {
+ return Int8{}, fmt.Errorf("%f is greater than maximum value for int64", w)
+ }
+
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+func (w *float64Wrapper) ScanFloat64(v Float8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *float64")
+ }
+
+ *w = float64Wrapper(v.Float64)
+
+ return nil
+}
+
+func (w float64Wrapper) Float64Value() (Float8, error) {
+ return Float8{Float64: float64(w), Valid: true}, nil
+}
+
+type stringWrapper string
+
+func (w stringWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *stringWrapper) ScanText(v Text) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *string")
+ }
+
+ *w = stringWrapper(v.String)
+ return nil
+}
+
+func (w stringWrapper) TextValue() (Text, error) {
+ return Text{String: string(w), Valid: true}, nil
+}
+
+type timeWrapper time.Time
+
+func (w *timeWrapper) ScanDate(v Date) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Time")
+ }
+
+ switch v.InfinityModifier {
+ case Finite:
+ *w = timeWrapper(v.Time)
+ return nil
+ case Infinity:
+ return fmt.Errorf("cannot scan Infinity into *time.Time")
+ case NegativeInfinity:
+ return fmt.Errorf("cannot scan -Infinity into *time.Time")
+ default:
+ return fmt.Errorf("invalid InfinityModifier: %v", v.InfinityModifier)
+ }
+}
+
+func (w timeWrapper) DateValue() (Date, error) {
+ return Date{Time: time.Time(w), Valid: true}, nil
+}
+
+func (w *timeWrapper) ScanTimestamp(v Timestamp) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Time")
+ }
+
+ switch v.InfinityModifier {
+ case Finite:
+ *w = timeWrapper(v.Time)
+ return nil
+ case Infinity:
+ return fmt.Errorf("cannot scan Infinity into *time.Time")
+ case NegativeInfinity:
+ return fmt.Errorf("cannot scan -Infinity into *time.Time")
+ default:
+ return fmt.Errorf("invalid InfinityModifier: %v", v.InfinityModifier)
+ }
+}
+
+func (w timeWrapper) TimestampValue() (Timestamp, error) {
+ return Timestamp{Time: time.Time(w), Valid: true}, nil
+}
+
+func (w *timeWrapper) ScanTimestamptz(v Timestamptz) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Time")
+ }
+
+ switch v.InfinityModifier {
+ case Finite:
+ *w = timeWrapper(v.Time)
+ return nil
+ case Infinity:
+ return fmt.Errorf("cannot scan Infinity into *time.Time")
+ case NegativeInfinity:
+ return fmt.Errorf("cannot scan -Infinity into *time.Time")
+ default:
+ return fmt.Errorf("invalid InfinityModifier: %v", v.InfinityModifier)
+ }
+}
+
+func (w timeWrapper) TimestamptzValue() (Timestamptz, error) {
+ return Timestamptz{Time: time.Time(w), Valid: true}, nil
+}
+
+func (w *timeWrapper) ScanTime(v Time) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Time")
+ }
+
+ // 24:00:00 is max allowed time in PostgreSQL, but time.Time will normalize that to 00:00:00 the next day.
+ var maxRepresentableByTime int64 = 24*60*60*1000000 - 1
+ if v.Microseconds > maxRepresentableByTime {
+ return fmt.Errorf("%d microseconds cannot be represented as time.Time", v.Microseconds)
+ }
+
+ usec := v.Microseconds
+ hours := usec / microsecondsPerHour
+ usec -= hours * microsecondsPerHour
+ minutes := usec / microsecondsPerMinute
+ usec -= minutes * microsecondsPerMinute
+ seconds := usec / microsecondsPerSecond
+ usec -= seconds * microsecondsPerSecond
+ ns := usec * 1000
+ *w = timeWrapper(time.Date(2000, 1, 1, int(hours), int(minutes), int(seconds), int(ns), time.UTC))
+ return nil
+}
+
+func (w timeWrapper) TimeValue() (Time, error) {
+ t := time.Time(w)
+ usec := int64(t.Hour())*microsecondsPerHour +
+ int64(t.Minute())*microsecondsPerMinute +
+ int64(t.Second())*microsecondsPerSecond +
+ int64(t.Nanosecond())/1000
+ return Time{Microseconds: usec, Valid: true}, nil
+}
+
+type durationWrapper time.Duration
+
+func (w durationWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *durationWrapper) ScanInterval(v Interval) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Interval")
+ }
+
+ us := int64(v.Months)*microsecondsPerMonth + int64(v.Days)*microsecondsPerDay + v.Microseconds
+ *w = durationWrapper(time.Duration(us) * time.Microsecond)
+ return nil
+}
+
+func (w durationWrapper) IntervalValue() (Interval, error) {
+ return Interval{Microseconds: int64(w) / 1000, Valid: true}, nil
+}
+
+type netIPNetWrapper net.IPNet
+
+func (w *netIPNetWrapper) ScanNetipPrefix(v netip.Prefix) error {
+ if !v.IsValid() {
+ return fmt.Errorf("cannot scan NULL into *net.IPNet")
+ }
+
+ *w = netIPNetWrapper{
+ IP: v.Addr().AsSlice(),
+ Mask: net.CIDRMask(v.Bits(), v.Addr().BitLen()),
+ }
+
+ return nil
+}
+func (w netIPNetWrapper) NetipPrefixValue() (netip.Prefix, error) {
+ ip, ok := netip.AddrFromSlice(w.IP)
+ if !ok {
+ return netip.Prefix{}, errors.New("invalid net.IPNet")
+ }
+
+ ones, _ := w.Mask.Size()
+
+ return netip.PrefixFrom(ip, ones), nil
+}
+
+type netIPWrapper net.IP
+
+func (w netIPWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *netIPWrapper) ScanNetipPrefix(v netip.Prefix) error {
+ if !v.IsValid() {
+ *w = nil
+ return nil
+ }
+
+ if v.Addr().BitLen() != v.Bits() {
+ return fmt.Errorf("cannot scan %v to *net.IP", v)
+ }
+
+ *w = netIPWrapper(v.Addr().AsSlice())
+ return nil
+}
+
+func (w netIPWrapper) NetipPrefixValue() (netip.Prefix, error) {
+ if w == nil {
+ return netip.Prefix{}, nil
+ }
+
+ addr, ok := netip.AddrFromSlice([]byte(w))
+ if !ok {
+ return netip.Prefix{}, errors.New("invalid net.IP")
+ }
+
+ return netip.PrefixFrom(addr, addr.BitLen()), nil
+}
+
+type netipPrefixWrapper netip.Prefix
+
+func (w *netipPrefixWrapper) ScanNetipPrefix(v netip.Prefix) error {
+ *w = netipPrefixWrapper(v)
+ return nil
+}
+
+func (w netipPrefixWrapper) NetipPrefixValue() (netip.Prefix, error) {
+ return netip.Prefix(w), nil
+}
+
+type netipAddrWrapper netip.Addr
+
+func (w *netipAddrWrapper) ScanNetipPrefix(v netip.Prefix) error {
+ if !v.IsValid() {
+ *w = netipAddrWrapper(netip.Addr{})
+ return nil
+ }
+
+ if v.Addr().BitLen() != v.Bits() {
+ return fmt.Errorf("cannot scan %v to netip.Addr", v)
+ }
+
+ *w = netipAddrWrapper(v.Addr())
+
+ return nil
+}
+
+func (w netipAddrWrapper) NetipPrefixValue() (netip.Prefix, error) {
+ addr := (netip.Addr)(w)
+ if !addr.IsValid() {
+ return netip.Prefix{}, nil
+ }
+
+ return netip.PrefixFrom(addr, addr.BitLen()), nil
+}
+
+type mapStringToPointerStringWrapper map[string]*string
+
+func (w *mapStringToPointerStringWrapper) ScanHstore(v Hstore) error {
+ *w = mapStringToPointerStringWrapper(v)
+ return nil
+}
+
+func (w mapStringToPointerStringWrapper) HstoreValue() (Hstore, error) {
+ return Hstore(w), nil
+}
+
+type mapStringToStringWrapper map[string]string
+
+func (w *mapStringToStringWrapper) ScanHstore(v Hstore) error {
+ *w = make(mapStringToStringWrapper, len(v))
+ for k, v := range v {
+ if v == nil {
+ return fmt.Errorf("cannot scan NULL to string")
+ }
+ (*w)[k] = *v
+ }
+ return nil
+}
+
+func (w mapStringToStringWrapper) HstoreValue() (Hstore, error) {
+ if w == nil {
+ return nil, nil
+ }
+
+ hstore := make(Hstore, len(w))
+ for k, v := range w {
+ s := v
+ hstore[k] = &s
+ }
+ return hstore, nil
+}
+
+type fmtStringerWrapper struct {
+ s fmt.Stringer
+}
+
+func (w fmtStringerWrapper) TextValue() (Text, error) {
+ return Text{String: w.s.String(), Valid: true}, nil
+}
+
+type byte16Wrapper [16]byte
+
+func (w *byte16Wrapper) ScanUUID(v UUID) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *[16]byte")
+ }
+ *w = byte16Wrapper(v.Bytes)
+ return nil
+}
+
+func (w byte16Wrapper) UUIDValue() (UUID, error) {
+ return UUID{Bytes: [16]byte(w), Valid: true}, nil
+}
+
+type byteSliceWrapper []byte
+
+func (w byteSliceWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *byteSliceWrapper) ScanText(v Text) error {
+ if !v.Valid {
+ *w = nil
+ return nil
+ }
+
+ *w = byteSliceWrapper(v.String)
+ return nil
+}
+
+func (w byteSliceWrapper) TextValue() (Text, error) {
+ if w == nil {
+ return Text{}, nil
+ }
+
+ return Text{String: string(w), Valid: true}, nil
+}
+
+func (w *byteSliceWrapper) ScanUUID(v UUID) error {
+ if !v.Valid {
+ *w = nil
+ return nil
+ }
+ *w = make(byteSliceWrapper, 16)
+ copy(*w, v.Bytes[:])
+ return nil
+}
+
+func (w byteSliceWrapper) UUIDValue() (UUID, error) {
+ if w == nil {
+ return UUID{}, nil
+ }
+
+ uuid := UUID{Valid: true}
+ copy(uuid.Bytes[:], w)
+ return uuid, nil
+}
+
+// structWrapper implements CompositeIndexGetter for a struct.
+type structWrapper struct {
+ s any
+ exportedFields []reflect.Value
+}
+
+func (w structWrapper) IsNull() bool {
+ return w.s == nil
+}
+
+func (w structWrapper) Index(i int) any {
+ if i >= len(w.exportedFields) {
+ return fmt.Errorf("%#v only has %d public fields - %d is out of bounds", w.s, len(w.exportedFields), i)
+ }
+
+ return w.exportedFields[i].Interface()
+}
+
+// ptrStructWrapper implements CompositeIndexScanner for a pointer to a struct.
+type ptrStructWrapper struct {
+ s any
+ exportedFields []reflect.Value
+}
+
+func (w *ptrStructWrapper) ScanNull() error {
+ return fmt.Errorf("cannot scan NULL into %#v", w.s)
+}
+
+func (w *ptrStructWrapper) ScanIndex(i int) any {
+ if i >= len(w.exportedFields) {
+ return fmt.Errorf("%#v only has %d public fields - %d is out of bounds", w.s, len(w.exportedFields), i)
+ }
+
+ return w.exportedFields[i].Addr().Interface()
+}
+
+type anySliceArrayReflect struct {
+ slice reflect.Value
+}
+
+func (a anySliceArrayReflect) Dimensions() []ArrayDimension {
+ if a.slice.IsNil() {
+ return nil
+ }
+
+ return []ArrayDimension{{Length: int32(a.slice.Len()), LowerBound: 1}}
+}
+
+func (a anySliceArrayReflect) Index(i int) any {
+ return a.slice.Index(i).Interface()
+}
+
+func (a anySliceArrayReflect) IndexType() any {
+ return reflect.New(a.slice.Type().Elem()).Elem().Interface()
+}
+
+func (a *anySliceArrayReflect) SetDimensions(dimensions []ArrayDimension) error {
+ sliceType := a.slice.Type()
+
+ if dimensions == nil {
+ a.slice.Set(reflect.Zero(sliceType))
+ return nil
+ }
+
+ elementCount := cardinality(dimensions)
+ slice := reflect.MakeSlice(sliceType, elementCount, elementCount)
+ a.slice.Set(slice)
+ return nil
+}
+
+func (a *anySliceArrayReflect) ScanIndex(i int) any {
+ return a.slice.Index(i).Addr().Interface()
+}
+
+func (a *anySliceArrayReflect) ScanIndexType() any {
+ return reflect.New(a.slice.Type().Elem()).Interface()
+}
+
+type anyMultiDimSliceArray struct {
+ slice reflect.Value
+ dims []ArrayDimension
+}
+
+func (a *anyMultiDimSliceArray) Dimensions() []ArrayDimension {
+ if a.slice.IsNil() {
+ return nil
+ }
+
+ s := a.slice
+ for {
+ a.dims = append(a.dims, ArrayDimension{Length: int32(s.Len()), LowerBound: 1})
+ if s.Len() > 0 {
+ s = s.Index(0)
+ } else {
+ break
+ }
+ if s.Type().Kind() == reflect.Slice {
+ } else {
+ break
+ }
+ }
+
+ return a.dims
+}
+
+func (a *anyMultiDimSliceArray) Index(i int) any {
+ if len(a.dims) == 1 {
+ return a.slice.Index(i).Interface()
+ }
+
+ indexes := make([]int, len(a.dims))
+ for j := len(a.dims) - 1; j >= 0; j-- {
+ dimLen := int(a.dims[j].Length)
+ indexes[j] = i % dimLen
+ i = i / dimLen
+ }
+
+ v := a.slice
+ for _, si := range indexes {
+ v = v.Index(si)
+ }
+
+ return v.Interface()
+}
+
+func (a *anyMultiDimSliceArray) IndexType() any {
+ lowestSliceType := a.slice.Type()
+ for ; lowestSliceType.Elem().Kind() == reflect.Slice; lowestSliceType = lowestSliceType.Elem() {
+ }
+ return reflect.New(lowestSliceType.Elem()).Elem().Interface()
+}
+
+func (a *anyMultiDimSliceArray) SetDimensions(dimensions []ArrayDimension) error {
+ sliceType := a.slice.Type()
+
+ if dimensions == nil {
+ a.slice.Set(reflect.Zero(sliceType))
+ return nil
+ }
+
+ switch len(dimensions) {
+ case 0:
+ // Empty, but non-nil array
+ slice := reflect.MakeSlice(sliceType, 0, 0)
+ a.slice.Set(slice)
+ return nil
+ case 1:
+ elementCount := cardinality(dimensions)
+ slice := reflect.MakeSlice(sliceType, elementCount, elementCount)
+ a.slice.Set(slice)
+ return nil
+ default:
+ sliceDimensionCount := 1
+ lowestSliceType := sliceType
+ for ; lowestSliceType.Elem().Kind() == reflect.Slice; lowestSliceType = lowestSliceType.Elem() {
+ sliceDimensionCount++
+ }
+
+ if sliceDimensionCount != len(dimensions) {
+ return fmt.Errorf("PostgreSQL array has %d dimensions but slice has %d dimensions", len(dimensions), sliceDimensionCount)
+ }
+
+ elementCount := cardinality(dimensions)
+ flatSlice := reflect.MakeSlice(lowestSliceType, elementCount, elementCount)
+
+ multiDimSlice := a.makeMultidimensionalSlice(sliceType, dimensions, flatSlice, 0)
+ a.slice.Set(multiDimSlice)
+
+ // Now that a.slice is a multi-dimensional slice with the underlying data pointed at flatSlice change a.slice to
+ // flatSlice so ScanIndex only has to handle simple one dimensional slices.
+ a.slice = flatSlice
+
+ return nil
+ }
+
+}
+
+func (a *anyMultiDimSliceArray) makeMultidimensionalSlice(sliceType reflect.Type, dimensions []ArrayDimension, flatSlice reflect.Value, flatSliceIdx int) reflect.Value {
+ if len(dimensions) == 1 {
+ endIdx := flatSliceIdx + int(dimensions[0].Length)
+ return flatSlice.Slice3(flatSliceIdx, endIdx, endIdx)
+ }
+
+ sliceLen := int(dimensions[0].Length)
+ slice := reflect.MakeSlice(sliceType, sliceLen, sliceLen)
+ for i := 0; i < sliceLen; i++ {
+ subSlice := a.makeMultidimensionalSlice(sliceType.Elem(), dimensions[1:], flatSlice, flatSliceIdx+(i*int(dimensions[1].Length)))
+ slice.Index(i).Set(subSlice)
+ }
+
+ return slice
+}
+
+func (a *anyMultiDimSliceArray) ScanIndex(i int) any {
+ return a.slice.Index(i).Addr().Interface()
+}
+
+func (a *anyMultiDimSliceArray) ScanIndexType() any {
+ lowestSliceType := a.slice.Type()
+ for ; lowestSliceType.Elem().Kind() == reflect.Slice; lowestSliceType = lowestSliceType.Elem() {
+ }
+ return reflect.New(lowestSliceType.Elem()).Interface()
+}
+
+type anyArrayArrayReflect struct {
+ array reflect.Value
+}
+
+func (a anyArrayArrayReflect) Dimensions() []ArrayDimension {
+ return []ArrayDimension{{Length: int32(a.array.Len()), LowerBound: 1}}
+}
+
+func (a anyArrayArrayReflect) Index(i int) any {
+ return a.array.Index(i).Interface()
+}
+
+func (a anyArrayArrayReflect) IndexType() any {
+ return reflect.New(a.array.Type().Elem()).Elem().Interface()
+}
+
+func (a *anyArrayArrayReflect) SetDimensions(dimensions []ArrayDimension) error {
+ if dimensions == nil {
+ return fmt.Errorf("anyArrayArrayReflect: cannot scan NULL into %v", a.array.Type().String())
+ }
+
+ if len(dimensions) != 1 {
+ return fmt.Errorf("anyArrayArrayReflect: cannot scan multi-dimensional array into %v", a.array.Type().String())
+ }
+
+ if int(dimensions[0].Length) != a.array.Len() {
+ return fmt.Errorf("anyArrayArrayReflect: cannot scan array with length %v into %v", dimensions[0].Length, a.array.Type().String())
+ }
+
+ return nil
+}
+
+func (a *anyArrayArrayReflect) ScanIndex(i int) any {
+ return a.array.Index(i).Addr().Interface()
+}
+
+func (a *anyArrayArrayReflect) ScanIndexType() any {
+ return reflect.New(a.array.Type().Elem()).Interface()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/bytea.go b/vendor/github.com/jackc/pgx/v5/pgtype/bytea.go
new file mode 100644
index 0000000..a247705
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/bytea.go
@@ -0,0 +1,255 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+)
+
+type BytesScanner interface {
+ // ScanBytes receives a byte slice of driver memory that is only valid until the next database method call.
+ ScanBytes(v []byte) error
+}
+
+type BytesValuer interface {
+ // BytesValue returns a byte slice of the byte data. The caller must not change the returned slice.
+ BytesValue() ([]byte, error)
+}
+
+// DriverBytes is a byte slice that holds a reference to memory owned by the driver. It is only valid from the time it
+// is scanned until Rows.Next or Rows.Close is called. It is never safe to use DriverBytes with QueryRow as Row.Scan
+// internally calls Rows.Close before returning.
+type DriverBytes []byte
+
+func (b *DriverBytes) ScanBytes(v []byte) error {
+ *b = v
+ return nil
+}
+
+// PreallocBytes is a byte slice of preallocated memory that scanned bytes will be copied to. If it is too small a new
+// slice will be allocated.
+type PreallocBytes []byte
+
+func (b *PreallocBytes) ScanBytes(v []byte) error {
+ if v == nil {
+ *b = nil
+ return nil
+ }
+
+ if len(v) <= len(*b) {
+ *b = (*b)[:len(v)]
+ } else {
+ *b = make(PreallocBytes, len(v))
+ }
+ copy(*b, v)
+ return nil
+}
+
+// UndecodedBytes can be used as a scan target to get the raw bytes from PostgreSQL without any decoding.
+type UndecodedBytes []byte
+
+type scanPlanAnyToUndecodedBytes struct{}
+
+func (scanPlanAnyToUndecodedBytes) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*UndecodedBytes)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type ByteaCodec struct{}
+
+func (ByteaCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (ByteaCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (ByteaCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case []byte:
+ return encodePlanBytesCodecBinaryBytes{}
+ case BytesValuer:
+ return encodePlanBytesCodecBinaryBytesValuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case []byte:
+ return encodePlanBytesCodecTextBytes{}
+ case BytesValuer:
+ return encodePlanBytesCodecTextBytesValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanBytesCodecBinaryBytes struct{}
+
+func (encodePlanBytesCodecBinaryBytes) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b := value.([]byte)
+ if b == nil {
+ return nil, nil
+ }
+
+ return append(buf, b...), nil
+}
+
+type encodePlanBytesCodecBinaryBytesValuer struct{}
+
+func (encodePlanBytesCodecBinaryBytesValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b, err := value.(BytesValuer).BytesValue()
+ if err != nil {
+ return nil, err
+ }
+ if b == nil {
+ return nil, nil
+ }
+
+ return append(buf, b...), nil
+}
+
+type encodePlanBytesCodecTextBytes struct{}
+
+func (encodePlanBytesCodecTextBytes) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b := value.([]byte)
+ if b == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, `\x`...)
+ buf = append(buf, hex.EncodeToString(b)...)
+ return buf, nil
+}
+
+type encodePlanBytesCodecTextBytesValuer struct{}
+
+func (encodePlanBytesCodecTextBytesValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b, err := value.(BytesValuer).BytesValue()
+ if err != nil {
+ return nil, err
+ }
+ if b == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, `\x`...)
+ buf = append(buf, hex.EncodeToString(b)...)
+ return buf, nil
+}
+
+func (ByteaCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *[]byte:
+ return scanPlanBinaryBytesToBytes{}
+ case BytesScanner:
+ return scanPlanBinaryBytesToBytesScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *[]byte:
+ return scanPlanTextByteaToBytes{}
+ case BytesScanner:
+ return scanPlanTextByteaToBytesScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryBytesToBytes struct{}
+
+func (scanPlanBinaryBytesToBytes) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanBinaryBytesToBytesScanner struct{}
+
+func (scanPlanBinaryBytesToBytesScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BytesScanner)
+ return scanner.ScanBytes(src)
+}
+
+type scanPlanTextByteaToBytes struct{}
+
+func (scanPlanTextByteaToBytes) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ buf, err := decodeHexBytea(src)
+ if err != nil {
+ return err
+ }
+ *dstBuf = buf
+
+ return nil
+}
+
+type scanPlanTextByteaToBytesScanner struct{}
+
+func (scanPlanTextByteaToBytesScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BytesScanner)
+ buf, err := decodeHexBytea(src)
+ if err != nil {
+ return err
+ }
+ return scanner.ScanBytes(buf)
+}
+
+func decodeHexBytea(src []byte) ([]byte, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ if len(src) < 2 || src[0] != '\\' || src[1] != 'x' {
+ return nil, fmt.Errorf("invalid hex format")
+ }
+
+ buf := make([]byte, (len(src)-2)/2)
+ _, err := hex.Decode(buf, src[2:])
+ if err != nil {
+ return nil, err
+ }
+
+ return buf, nil
+}
+
+func (c ByteaCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c ByteaCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var buf []byte
+ err := codecScan(c, m, oid, format, src, &buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/circle.go b/vendor/github.com/jackc/pgx/v5/pgtype/circle.go
new file mode 100644
index 0000000..e8f118c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/circle.go
@@ -0,0 +1,222 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type CircleScanner interface {
+ ScanCircle(v Circle) error
+}
+
+type CircleValuer interface {
+ CircleValue() (Circle, error)
+}
+
+type Circle struct {
+ P Vec2
+ R float64
+ Valid bool
+}
+
+func (c *Circle) ScanCircle(v Circle) error {
+ *c = v
+ return nil
+}
+
+func (c Circle) CircleValue() (Circle, error) {
+ return c, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Circle) Scan(src any) error {
+ if src == nil {
+ *dst = Circle{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToCircleScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Circle) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := CircleCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type CircleCodec struct{}
+
+func (CircleCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (CircleCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (CircleCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(CircleValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanCircleCodecBinary{}
+ case TextFormatCode:
+ return encodePlanCircleCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanCircleCodecBinary struct{}
+
+func (encodePlanCircleCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ circle, err := value.(CircleValuer).CircleValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !circle.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(circle.P.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(circle.P.Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(circle.R))
+ return buf, nil
+}
+
+type encodePlanCircleCodecText struct{}
+
+func (encodePlanCircleCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ circle, err := value.(CircleValuer).CircleValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !circle.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`<(%s,%s),%s>`,
+ strconv.FormatFloat(circle.P.X, 'f', -1, 64),
+ strconv.FormatFloat(circle.P.Y, 'f', -1, 64),
+ strconv.FormatFloat(circle.R, 'f', -1, 64),
+ )...)
+ return buf, nil
+}
+
+func (CircleCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case CircleScanner:
+ return scanPlanBinaryCircleToCircleScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case CircleScanner:
+ return scanPlanTextAnyToCircleScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c CircleCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c CircleCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var circle Circle
+ err := codecScan(c, m, oid, format, src, &circle)
+ if err != nil {
+ return nil, err
+ }
+ return circle, nil
+}
+
+type scanPlanBinaryCircleToCircleScanner struct{}
+
+func (scanPlanBinaryCircleToCircleScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(CircleScanner)
+
+ if src == nil {
+ return scanner.ScanCircle(Circle{})
+ }
+
+ if len(src) != 24 {
+ return fmt.Errorf("invalid length for Circle: %v", len(src))
+ }
+
+ x := binary.BigEndian.Uint64(src)
+ y := binary.BigEndian.Uint64(src[8:])
+ r := binary.BigEndian.Uint64(src[16:])
+
+ return scanner.ScanCircle(Circle{
+ P: Vec2{math.Float64frombits(x), math.Float64frombits(y)},
+ R: math.Float64frombits(r),
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToCircleScanner struct{}
+
+func (scanPlanTextAnyToCircleScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(CircleScanner)
+
+ if src == nil {
+ return scanner.ScanCircle(Circle{})
+ }
+
+ if len(src) < 9 {
+ return fmt.Errorf("invalid length for Circle: %v", len(src))
+ }
+
+ str := string(src[2:])
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+2 : len(str)-1]
+
+ r, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanCircle(Circle{P: Vec2{x, y}, R: r, Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/composite.go b/vendor/github.com/jackc/pgx/v5/pgtype/composite.go
new file mode 100644
index 0000000..fb37232
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/composite.go
@@ -0,0 +1,602 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// CompositeIndexGetter is a type accessed by index that can be converted into a PostgreSQL composite.
+type CompositeIndexGetter interface {
+ // IsNull returns true if the value is SQL NULL.
+ IsNull() bool
+
+ // Index returns the element at i.
+ Index(i int) any
+}
+
+// CompositeIndexScanner is a type accessed by index that can be scanned from a PostgreSQL composite.
+type CompositeIndexScanner interface {
+ // ScanNull sets the value to SQL NULL.
+ ScanNull() error
+
+ // ScanIndex returns a value usable as a scan target for i.
+ ScanIndex(i int) any
+}
+
+type CompositeCodecField struct {
+ Name string
+ Type *Type
+}
+
+type CompositeCodec struct {
+ Fields []CompositeCodecField
+}
+
+func (c *CompositeCodec) FormatSupported(format int16) bool {
+ for _, f := range c.Fields {
+ if !f.Type.Codec.FormatSupported(format) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *CompositeCodec) PreferredFormat() int16 {
+ if c.FormatSupported(BinaryFormatCode) {
+ return BinaryFormatCode
+ }
+ return TextFormatCode
+}
+
+func (c *CompositeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(CompositeIndexGetter); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return &encodePlanCompositeCodecCompositeIndexGetterToBinary{cc: c, m: m}
+ case TextFormatCode:
+ return &encodePlanCompositeCodecCompositeIndexGetterToText{cc: c, m: m}
+ }
+
+ return nil
+}
+
+type encodePlanCompositeCodecCompositeIndexGetterToBinary struct {
+ cc *CompositeCodec
+ m *Map
+}
+
+func (plan *encodePlanCompositeCodecCompositeIndexGetterToBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ getter := value.(CompositeIndexGetter)
+
+ if getter.IsNull() {
+ return nil, nil
+ }
+
+ builder := NewCompositeBinaryBuilder(plan.m, buf)
+ for i, field := range plan.cc.Fields {
+ builder.AppendValue(field.Type.OID, getter.Index(i))
+ }
+
+ return builder.Finish()
+}
+
+type encodePlanCompositeCodecCompositeIndexGetterToText struct {
+ cc *CompositeCodec
+ m *Map
+}
+
+func (plan *encodePlanCompositeCodecCompositeIndexGetterToText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ getter := value.(CompositeIndexGetter)
+
+ if getter.IsNull() {
+ return nil, nil
+ }
+
+ b := NewCompositeTextBuilder(plan.m, buf)
+ for i, field := range plan.cc.Fields {
+ b.AppendValue(field.Type.OID, getter.Index(i))
+ }
+
+ return b.Finish()
+}
+
+func (c *CompositeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case CompositeIndexScanner:
+ return &scanPlanBinaryCompositeToCompositeIndexScanner{cc: c, m: m}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case CompositeIndexScanner:
+ return &scanPlanTextCompositeToCompositeIndexScanner{cc: c, m: m}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryCompositeToCompositeIndexScanner struct {
+ cc *CompositeCodec
+ m *Map
+}
+
+func (plan *scanPlanBinaryCompositeToCompositeIndexScanner) Scan(src []byte, target any) error {
+ targetScanner := (target).(CompositeIndexScanner)
+
+ if src == nil {
+ return targetScanner.ScanNull()
+ }
+
+ scanner := NewCompositeBinaryScanner(plan.m, src)
+ for i, field := range plan.cc.Fields {
+ if scanner.Next() {
+ fieldTarget := targetScanner.ScanIndex(i)
+ if fieldTarget != nil {
+ fieldPlan := plan.m.PlanScan(field.Type.OID, BinaryFormatCode, fieldTarget)
+ if fieldPlan == nil {
+ return fmt.Errorf("unable to encode %v into OID %d in binary format", field, field.Type.OID)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), fieldTarget)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New("read past end of composite")
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type scanPlanTextCompositeToCompositeIndexScanner struct {
+ cc *CompositeCodec
+ m *Map
+}
+
+func (plan *scanPlanTextCompositeToCompositeIndexScanner) Scan(src []byte, target any) error {
+ targetScanner := (target).(CompositeIndexScanner)
+
+ if src == nil {
+ return targetScanner.ScanNull()
+ }
+
+ scanner := NewCompositeTextScanner(plan.m, src)
+ for i, field := range plan.cc.Fields {
+ if scanner.Next() {
+ fieldTarget := targetScanner.ScanIndex(i)
+ if fieldTarget != nil {
+ fieldPlan := plan.m.PlanScan(field.Type.OID, TextFormatCode, fieldTarget)
+ if fieldPlan == nil {
+ return fmt.Errorf("unable to encode %v into OID %d in text format", field, field.Type.OID)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), fieldTarget)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New("read past end of composite")
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *CompositeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (c *CompositeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ scanner := NewCompositeTextScanner(m, src)
+ values := make(map[string]any, len(c.Fields))
+ for i := 0; scanner.Next() && i < len(c.Fields); i++ {
+ var v any
+ fieldPlan := m.PlanScan(c.Fields[i].Type.OID, TextFormatCode, &v)
+ if fieldPlan == nil {
+ return nil, fmt.Errorf("unable to scan OID %d in text format into %v", c.Fields[i].Type.OID, v)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), &v)
+ if err != nil {
+ return nil, err
+ }
+
+ values[c.Fields[i].Name] = v
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return values, nil
+ case BinaryFormatCode:
+ scanner := NewCompositeBinaryScanner(m, src)
+ values := make(map[string]any, len(c.Fields))
+ for i := 0; scanner.Next() && i < len(c.Fields); i++ {
+ var v any
+ fieldPlan := m.PlanScan(scanner.OID(), BinaryFormatCode, &v)
+ if fieldPlan == nil {
+ return nil, fmt.Errorf("unable to scan OID %d in binary format into %v", scanner.OID(), v)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), &v)
+ if err != nil {
+ return nil, err
+ }
+
+ values[c.Fields[i].Name] = v
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return values, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+
+}
+
+type CompositeBinaryScanner struct {
+ m *Map
+ rp int
+ src []byte
+
+ fieldCount int32
+ fieldBytes []byte
+ fieldOID uint32
+ err error
+}
+
+// NewCompositeBinaryScanner a scanner over a binary encoded composite balue.
+func NewCompositeBinaryScanner(m *Map, src []byte) *CompositeBinaryScanner {
+ rp := 0
+ if len(src[rp:]) < 4 {
+ return &CompositeBinaryScanner{err: fmt.Errorf("Record incomplete %v", src)}
+ }
+
+ fieldCount := int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ return &CompositeBinaryScanner{
+ m: m,
+ rp: rp,
+ src: src,
+ fieldCount: fieldCount,
+ }
+}
+
+// Next advances the scanner to the next field. It returns false after the last field is read or an error occurs. After
+// Next returns false, the Err method can be called to check if any errors occurred.
+func (cfs *CompositeBinaryScanner) Next() bool {
+ if cfs.err != nil {
+ return false
+ }
+
+ if cfs.rp == len(cfs.src) {
+ return false
+ }
+
+ if len(cfs.src[cfs.rp:]) < 8 {
+ cfs.err = fmt.Errorf("Record incomplete %v", cfs.src)
+ return false
+ }
+ cfs.fieldOID = binary.BigEndian.Uint32(cfs.src[cfs.rp:])
+ cfs.rp += 4
+
+ fieldLen := int(int32(binary.BigEndian.Uint32(cfs.src[cfs.rp:])))
+ cfs.rp += 4
+
+ if fieldLen >= 0 {
+ if len(cfs.src[cfs.rp:]) < fieldLen {
+ cfs.err = fmt.Errorf("Record incomplete rp=%d src=%v", cfs.rp, cfs.src)
+ return false
+ }
+ cfs.fieldBytes = cfs.src[cfs.rp : cfs.rp+fieldLen]
+ cfs.rp += fieldLen
+ } else {
+ cfs.fieldBytes = nil
+ }
+
+ return true
+}
+
+func (cfs *CompositeBinaryScanner) FieldCount() int {
+ return int(cfs.fieldCount)
+}
+
+// Bytes returns the bytes of the field most recently read by Scan().
+func (cfs *CompositeBinaryScanner) Bytes() []byte {
+ return cfs.fieldBytes
+}
+
+// OID returns the OID of the field most recently read by Scan().
+func (cfs *CompositeBinaryScanner) OID() uint32 {
+ return cfs.fieldOID
+}
+
+// Err returns any error encountered by the scanner.
+func (cfs *CompositeBinaryScanner) Err() error {
+ return cfs.err
+}
+
+type CompositeTextScanner struct {
+ m *Map
+ rp int
+ src []byte
+
+ fieldBytes []byte
+ err error
+}
+
+// NewCompositeTextScanner a scanner over a text encoded composite value.
+func NewCompositeTextScanner(m *Map, src []byte) *CompositeTextScanner {
+ if len(src) < 2 {
+ return &CompositeTextScanner{err: fmt.Errorf("Record incomplete %v", src)}
+ }
+
+ if src[0] != '(' {
+ return &CompositeTextScanner{err: fmt.Errorf("composite text format must start with '('")}
+ }
+
+ if src[len(src)-1] != ')' {
+ return &CompositeTextScanner{err: fmt.Errorf("composite text format must end with ')'")}
+ }
+
+ return &CompositeTextScanner{
+ m: m,
+ rp: 1,
+ src: src,
+ }
+}
+
+// Next advances the scanner to the next field. It returns false after the last field is read or an error occurs. After
+// Next returns false, the Err method can be called to check if any errors occurred.
+func (cfs *CompositeTextScanner) Next() bool {
+ if cfs.err != nil {
+ return false
+ }
+
+ if cfs.rp == len(cfs.src) {
+ return false
+ }
+
+ switch cfs.src[cfs.rp] {
+ case ',', ')': // null
+ cfs.rp++
+ cfs.fieldBytes = nil
+ return true
+ case '"': // quoted value
+ cfs.rp++
+ cfs.fieldBytes = make([]byte, 0, 16)
+ for {
+ ch := cfs.src[cfs.rp]
+
+ if ch == '"' {
+ cfs.rp++
+ if cfs.src[cfs.rp] == '"' {
+ cfs.fieldBytes = append(cfs.fieldBytes, '"')
+ cfs.rp++
+ } else {
+ break
+ }
+ } else if ch == '\\' {
+ cfs.rp++
+ cfs.fieldBytes = append(cfs.fieldBytes, cfs.src[cfs.rp])
+ cfs.rp++
+ } else {
+ cfs.fieldBytes = append(cfs.fieldBytes, ch)
+ cfs.rp++
+ }
+ }
+ cfs.rp++
+ return true
+ default: // unquoted value
+ start := cfs.rp
+ for {
+ ch := cfs.src[cfs.rp]
+ if ch == ',' || ch == ')' {
+ break
+ }
+ cfs.rp++
+ }
+ cfs.fieldBytes = cfs.src[start:cfs.rp]
+ cfs.rp++
+ return true
+ }
+}
+
+// Bytes returns the bytes of the field most recently read by Scan().
+func (cfs *CompositeTextScanner) Bytes() []byte {
+ return cfs.fieldBytes
+}
+
+// Err returns any error encountered by the scanner.
+func (cfs *CompositeTextScanner) Err() error {
+ return cfs.err
+}
+
+type CompositeBinaryBuilder struct {
+ m *Map
+ buf []byte
+ startIdx int
+ fieldCount uint32
+ err error
+}
+
+func NewCompositeBinaryBuilder(m *Map, buf []byte) *CompositeBinaryBuilder {
+ startIdx := len(buf)
+ buf = append(buf, 0, 0, 0, 0) // allocate room for number of fields
+ return &CompositeBinaryBuilder{m: m, buf: buf, startIdx: startIdx}
+}
+
+func (b *CompositeBinaryBuilder) AppendValue(oid uint32, field any) {
+ if b.err != nil {
+ return
+ }
+
+ if field == nil {
+ b.buf = pgio.AppendUint32(b.buf, oid)
+ b.buf = pgio.AppendInt32(b.buf, -1)
+ b.fieldCount++
+ return
+ }
+
+ plan := b.m.PlanEncode(oid, BinaryFormatCode, field)
+ if plan == nil {
+ b.err = fmt.Errorf("unable to encode %v into OID %d in binary format", field, oid)
+ return
+ }
+
+ b.buf = pgio.AppendUint32(b.buf, oid)
+ lengthPos := len(b.buf)
+ b.buf = pgio.AppendInt32(b.buf, -1)
+ fieldBuf, err := plan.Encode(field, b.buf)
+ if err != nil {
+ b.err = err
+ return
+ }
+ if fieldBuf != nil {
+ binary.BigEndian.PutUint32(fieldBuf[lengthPos:], uint32(len(fieldBuf)-len(b.buf)))
+ b.buf = fieldBuf
+ }
+
+ b.fieldCount++
+}
+
+func (b *CompositeBinaryBuilder) Finish() ([]byte, error) {
+ if b.err != nil {
+ return nil, b.err
+ }
+
+ binary.BigEndian.PutUint32(b.buf[b.startIdx:], b.fieldCount)
+ return b.buf, nil
+}
+
+type CompositeTextBuilder struct {
+ m *Map
+ buf []byte
+ startIdx int
+ fieldCount uint32
+ err error
+ fieldBuf [32]byte
+}
+
+func NewCompositeTextBuilder(m *Map, buf []byte) *CompositeTextBuilder {
+ buf = append(buf, '(') // allocate room for number of fields
+ return &CompositeTextBuilder{m: m, buf: buf}
+}
+
+func (b *CompositeTextBuilder) AppendValue(oid uint32, field any) {
+ if b.err != nil {
+ return
+ }
+
+ if field == nil {
+ b.buf = append(b.buf, ',')
+ return
+ }
+
+ plan := b.m.PlanEncode(oid, TextFormatCode, field)
+ if plan == nil {
+ b.err = fmt.Errorf("unable to encode %v into OID %d in text format", field, oid)
+ return
+ }
+
+ fieldBuf, err := plan.Encode(field, b.fieldBuf[0:0])
+ if err != nil {
+ b.err = err
+ return
+ }
+ if fieldBuf != nil {
+ b.buf = append(b.buf, quoteCompositeFieldIfNeeded(string(fieldBuf))...)
+ }
+
+ b.buf = append(b.buf, ',')
+}
+
+func (b *CompositeTextBuilder) Finish() ([]byte, error) {
+ if b.err != nil {
+ return nil, b.err
+ }
+
+ b.buf[len(b.buf)-1] = ')'
+ return b.buf, nil
+}
+
+var quoteCompositeReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
+
+func quoteCompositeField(src string) string {
+ return `"` + quoteCompositeReplacer.Replace(src) + `"`
+}
+
+func quoteCompositeFieldIfNeeded(src string) string {
+ if src == "" || src[0] == ' ' || src[len(src)-1] == ' ' || strings.ContainsAny(src, `(),"\`) {
+ return quoteCompositeField(src)
+ }
+ return src
+}
+
+// CompositeFields represents the values of a composite value. It can be used as an encoding source or as a scan target.
+// It cannot scan a NULL, but the composite fields can be NULL.
+type CompositeFields []any
+
+func (cf CompositeFields) SkipUnderlyingTypePlan() {}
+
+func (cf CompositeFields) IsNull() bool {
+ return cf == nil
+}
+
+func (cf CompositeFields) Index(i int) any {
+ return cf[i]
+}
+
+func (cf CompositeFields) ScanNull() error {
+ return fmt.Errorf("cannot scan NULL into CompositeFields")
+}
+
+func (cf CompositeFields) ScanIndex(i int) any {
+ return cf[i]
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/convert.go b/vendor/github.com/jackc/pgx/v5/pgtype/convert.go
new file mode 100644
index 0000000..8a9cee9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/convert.go
@@ -0,0 +1,108 @@
+package pgtype
+
+import (
+ "reflect"
+)
+
+func NullAssignTo(dst any) error {
+ dstPtr := reflect.ValueOf(dst)
+
+ // AssignTo dst must always be a pointer
+ if dstPtr.Kind() != reflect.Ptr {
+ return &nullAssignmentError{dst: dst}
+ }
+
+ dstVal := dstPtr.Elem()
+
+ switch dstVal.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map:
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+ return nil
+ }
+
+ return &nullAssignmentError{dst: dst}
+}
+
+var kindTypes map[reflect.Kind]reflect.Type
+
+func toInterface(dst reflect.Value, t reflect.Type) (any, bool) {
+ nextDst := dst.Convert(t)
+ return nextDst.Interface(), dst.Type() != nextDst.Type()
+}
+
+// GetAssignToDstType attempts to convert dst to something AssignTo can assign
+// to. If dst is a pointer to pointer it allocates a value and returns the
+// dereferences pointer. If dst is a named type such as *Foo where Foo is type
+// Foo int16, it converts dst to *int16.
+//
+// GetAssignToDstType returns the converted dst and a bool representing if any
+// change was made.
+func GetAssignToDstType(dst any) (any, bool) {
+ dstPtr := reflect.ValueOf(dst)
+
+ // AssignTo dst must always be a pointer
+ if dstPtr.Kind() != reflect.Ptr {
+ return nil, false
+ }
+
+ dstVal := dstPtr.Elem()
+
+ // if dst is a pointer to pointer, allocate space try again with the dereferenced pointer
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal.Set(reflect.New(dstVal.Type().Elem()))
+ return dstVal.Interface(), true
+ }
+
+ // if dst is pointer to a base type that has been renamed
+ if baseValType, ok := kindTypes[dstVal.Kind()]; ok {
+ return toInterface(dstPtr, reflect.PtrTo(baseValType))
+ }
+
+ if dstVal.Kind() == reflect.Slice {
+ if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok {
+ return toInterface(dstPtr, reflect.PtrTo(reflect.SliceOf(baseElemType)))
+ }
+ }
+
+ if dstVal.Kind() == reflect.Array {
+ if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok {
+ return toInterface(dstPtr, reflect.PtrTo(reflect.ArrayOf(dstVal.Len(), baseElemType)))
+ }
+ }
+
+ if dstVal.Kind() == reflect.Struct {
+ if dstVal.Type().NumField() == 1 && dstVal.Type().Field(0).Anonymous {
+ dstPtr = dstVal.Field(0).Addr()
+ nested := dstVal.Type().Field(0).Type
+ if nested.Kind() == reflect.Array {
+ if baseElemType, ok := kindTypes[nested.Elem().Kind()]; ok {
+ return toInterface(dstPtr, reflect.PtrTo(reflect.ArrayOf(nested.Len(), baseElemType)))
+ }
+ }
+ if _, ok := kindTypes[nested.Kind()]; ok && dstPtr.CanInterface() {
+ return dstPtr.Interface(), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func init() {
+ kindTypes = map[reflect.Kind]reflect.Type{
+ reflect.Bool: reflect.TypeOf(false),
+ reflect.Float32: reflect.TypeOf(float32(0)),
+ reflect.Float64: reflect.TypeOf(float64(0)),
+ reflect.Int: reflect.TypeOf(int(0)),
+ reflect.Int8: reflect.TypeOf(int8(0)),
+ reflect.Int16: reflect.TypeOf(int16(0)),
+ reflect.Int32: reflect.TypeOf(int32(0)),
+ reflect.Int64: reflect.TypeOf(int64(0)),
+ reflect.Uint: reflect.TypeOf(uint(0)),
+ reflect.Uint8: reflect.TypeOf(uint8(0)),
+ reflect.Uint16: reflect.TypeOf(uint16(0)),
+ reflect.Uint32: reflect.TypeOf(uint32(0)),
+ reflect.Uint64: reflect.TypeOf(uint64(0)),
+ reflect.String: reflect.TypeOf(""),
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/date.go b/vendor/github.com/jackc/pgx/v5/pgtype/date.go
new file mode 100644
index 0000000..784b16d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/date.go
@@ -0,0 +1,351 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type DateScanner interface {
+ ScanDate(v Date) error
+}
+
+type DateValuer interface {
+ DateValue() (Date, error)
+}
+
+type Date struct {
+ Time time.Time
+ InfinityModifier InfinityModifier
+ Valid bool
+}
+
+func (d *Date) ScanDate(v Date) error {
+ *d = v
+ return nil
+}
+
+func (d Date) DateValue() (Date, error) {
+ return d, nil
+}
+
+const (
+ negativeInfinityDayOffset = -2147483648
+ infinityDayOffset = 2147483647
+)
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Date) Scan(src any) error {
+ if src == nil {
+ *dst = Date{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToDateScanner{}.Scan([]byte(src), dst)
+ case time.Time:
+ *dst = Date{Time: src, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Date) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ if src.InfinityModifier != Finite {
+ return src.InfinityModifier.String(), nil
+ }
+ return src.Time, nil
+}
+
+func (src Date) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ var s string
+
+ switch src.InfinityModifier {
+ case Finite:
+ s = src.Time.Format("2006-01-02")
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return json.Marshal(s)
+}
+
+func (dst *Date) UnmarshalJSON(b []byte) error {
+ var s *string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ if s == nil {
+ *dst = Date{}
+ return nil
+ }
+
+ switch *s {
+ case "infinity":
+ *dst = Date{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ *dst = Date{Valid: true, InfinityModifier: -Infinity}
+ default:
+ t, err := time.ParseInLocation("2006-01-02", *s, time.UTC)
+ if err != nil {
+ return err
+ }
+
+ *dst = Date{Time: t, Valid: true}
+ }
+
+ return nil
+}
+
+type DateCodec struct{}
+
+func (DateCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (DateCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (DateCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(DateValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanDateCodecBinary{}
+ case TextFormatCode:
+ return encodePlanDateCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanDateCodecBinary struct{}
+
+func (encodePlanDateCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ date, err := value.(DateValuer).DateValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !date.Valid {
+ return nil, nil
+ }
+
+ var daysSinceDateEpoch int32
+ switch date.InfinityModifier {
+ case Finite:
+ tUnix := time.Date(date.Time.Year(), date.Time.Month(), date.Time.Day(), 0, 0, 0, 0, time.UTC).Unix()
+ dateEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
+
+ secSinceDateEpoch := tUnix - dateEpoch
+ daysSinceDateEpoch = int32(secSinceDateEpoch / 86400)
+ case Infinity:
+ daysSinceDateEpoch = infinityDayOffset
+ case NegativeInfinity:
+ daysSinceDateEpoch = negativeInfinityDayOffset
+ }
+
+ return pgio.AppendInt32(buf, daysSinceDateEpoch), nil
+}
+
+type encodePlanDateCodecText struct{}
+
+func (encodePlanDateCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ date, err := value.(DateValuer).DateValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !date.Valid {
+ return nil, nil
+ }
+
+ switch date.InfinityModifier {
+ case Finite:
+ // Year 0000 is 1 BC
+ bc := false
+ year := date.Time.Year()
+ if year <= 0 {
+ year = -year + 1
+ bc = true
+ }
+
+ yearBytes := strconv.AppendInt(make([]byte, 0, 6), int64(year), 10)
+ for i := len(yearBytes); i < 4; i++ {
+ buf = append(buf, '0')
+ }
+ buf = append(buf, yearBytes...)
+ buf = append(buf, '-')
+ if date.Time.Month() < 10 {
+ buf = append(buf, '0')
+ }
+ buf = strconv.AppendInt(buf, int64(date.Time.Month()), 10)
+ buf = append(buf, '-')
+ if date.Time.Day() < 10 {
+ buf = append(buf, '0')
+ }
+ buf = strconv.AppendInt(buf, int64(date.Time.Day()), 10)
+
+ if bc {
+ buf = append(buf, " BC"...)
+ }
+ case Infinity:
+ buf = append(buf, "infinity"...)
+ case NegativeInfinity:
+ buf = append(buf, "-infinity"...)
+ }
+
+ return buf, nil
+}
+
+func (DateCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case DateScanner:
+ return scanPlanBinaryDateToDateScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case DateScanner:
+ return scanPlanTextAnyToDateScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryDateToDateScanner struct{}
+
+func (scanPlanBinaryDateToDateScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(DateScanner)
+
+ if src == nil {
+ return scanner.ScanDate(Date{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for date: %v", len(src))
+ }
+
+ dayOffset := int32(binary.BigEndian.Uint32(src))
+
+ switch dayOffset {
+ case infinityDayOffset:
+ return scanner.ScanDate(Date{InfinityModifier: Infinity, Valid: true})
+ case negativeInfinityDayOffset:
+ return scanner.ScanDate(Date{InfinityModifier: -Infinity, Valid: true})
+ default:
+ t := time.Date(2000, 1, int(1+dayOffset), 0, 0, 0, 0, time.UTC)
+ return scanner.ScanDate(Date{Time: t, Valid: true})
+ }
+}
+
+type scanPlanTextAnyToDateScanner struct{}
+
+var dateRegexp = regexp.MustCompile(`^(\d{4,})-(\d\d)-(\d\d)( BC)?$`)
+
+func (scanPlanTextAnyToDateScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(DateScanner)
+
+ if src == nil {
+ return scanner.ScanDate(Date{})
+ }
+
+ sbuf := string(src)
+ match := dateRegexp.FindStringSubmatch(sbuf)
+ if match != nil {
+ year, err := strconv.ParseInt(match[1], 10, 32)
+ if err != nil {
+ return fmt.Errorf("BUG: cannot parse date that regexp matched (year): %w", err)
+ }
+
+ month, err := strconv.ParseInt(match[2], 10, 32)
+ if err != nil {
+ return fmt.Errorf("BUG: cannot parse date that regexp matched (month): %w", err)
+ }
+
+ day, err := strconv.ParseInt(match[3], 10, 32)
+ if err != nil {
+ return fmt.Errorf("BUG: cannot parse date that regexp matched (month): %w", err)
+ }
+
+ // BC matched
+ if len(match[4]) > 0 {
+ year = -year + 1
+ }
+
+ t := time.Date(int(year), time.Month(month), int(day), 0, 0, 0, 0, time.UTC)
+ return scanner.ScanDate(Date{Time: t, Valid: true})
+ }
+
+ switch sbuf {
+ case "infinity":
+ return scanner.ScanDate(Date{InfinityModifier: Infinity, Valid: true})
+ case "-infinity":
+ return scanner.ScanDate(Date{InfinityModifier: -Infinity, Valid: true})
+ default:
+ return fmt.Errorf("invalid date format")
+ }
+}
+
+func (c DateCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var date Date
+ err := codecScan(c, m, oid, format, src, &date)
+ if err != nil {
+ return nil, err
+ }
+
+ if date.InfinityModifier != Finite {
+ return date.InfinityModifier.String(), nil
+ }
+
+ return date.Time, nil
+}
+
+func (c DateCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var date Date
+ err := codecScan(c, m, oid, format, src, &date)
+ if err != nil {
+ return nil, err
+ }
+
+ if date.InfinityModifier != Finite {
+ return date.InfinityModifier, nil
+ }
+
+ return date.Time, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/doc.go b/vendor/github.com/jackc/pgx/v5/pgtype/doc.go
new file mode 100644
index 0000000..83dfc5d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/doc.go
@@ -0,0 +1,196 @@
+// Package pgtype converts between Go and PostgreSQL values.
+/*
+The primary type is the Map type. It is a map of PostgreSQL types identified by OID (object ID) to a Codec. A Codec is
+responsible for converting between Go and PostgreSQL values. NewMap creates a Map with all supported standard PostgreSQL
+types already registered. Additional types can be registered with Map.RegisterType.
+
+Use Map.Scan and Map.Encode to decode PostgreSQL values to Go and encode Go values to PostgreSQL respectively.
+
+Base Type Mapping
+
+pgtype maps between all common base types directly between Go and PostgreSQL. In particular:
+
+ Go PostgreSQL
+ -----------------------
+ string varchar
+ text
+
+ // Integers are automatically be converted to any other integer type if
+ // it can be done without overflow or underflow.
+ int8
+ int16 smallint
+ int32 int
+ int64 bigint
+ int
+ uint8
+ uint16
+ uint32
+ uint64
+ uint
+
+ // Floats are strict and do not automatically convert like integers.
+ float32 float4
+ float64 float8
+
+ time.Time date
+ timestamp
+ timestamptz
+
+ netip.Addr inet
+ netip.Prefix cidr
+
+ []byte bytea
+
+Null Values
+
+pgtype can map NULLs in two ways. The first is types that can directly represent NULL such as Int4. They work in a
+similar fashion to database/sql. The second is to use a pointer to a pointer.
+
+ var foo pgtype.Text
+ var bar *string
+ err := conn.QueryRow("select foo, bar from widgets where id=$1", 42).Scan(&foo, &bar)
+ if err != nil {
+ return err
+ }
+
+When using nullable pgtype types as parameters for queries, one has to remember to explicitly set their Valid field to
+true, otherwise the parameter's value will be NULL.
+
+JSON Support
+
+pgtype automatically marshals and unmarshals data from json and jsonb PostgreSQL types.
+
+Extending Existing PostgreSQL Type Support
+
+Generally, all Codecs will support interfaces that can be implemented to enable scanning and encoding. For example,
+PointCodec can use any Go type that implements the PointScanner and PointValuer interfaces. So rather than use
+pgtype.Point and application can directly use its own point type with pgtype as long as it implements those interfaces.
+
+See example_custom_type_test.go for an example of a custom type for the PostgreSQL point type.
+
+Sometimes pgx supports a PostgreSQL type such as numeric but the Go type is in an external package that does not have
+pgx support such as github.com/shopspring/decimal. These types can be registered with pgtype with custom conversion
+logic. See https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid for example
+integrations.
+
+New PostgreSQL Type Support
+
+pgtype uses the PostgreSQL OID to determine how to encode or decode a value. pgtype supports array, composite, domain,
+and enum types. However, any type created in PostgreSQL with CREATE TYPE will receive a new OID. This means that the OID
+of each new PostgreSQL type must be registered for pgtype to handle values of that type with the correct Codec.
+
+The pgx.Conn LoadType method can return a *Type for array, composite, domain, and enum types by inspecting the database
+metadata. This *Type can then be registered with Map.RegisterType.
+
+For example, the following function could be called after a connection is established:
+
+ func RegisterDataTypes(ctx context.Context, conn *pgx.Conn) error {
+ dataTypeNames := []string{
+ "foo",
+ "_foo",
+ "bar",
+ "_bar",
+ }
+
+ for _, typeName := range dataTypeNames {
+ dataType, err := conn.LoadType(ctx, typeName)
+ if err != nil {
+ return err
+ }
+ conn.TypeMap().RegisterType(dataType)
+ }
+
+ return nil
+ }
+
+A type cannot be registered unless all types it depends on are already registered. e.g. An array type cannot be
+registered until its element type is registered.
+
+ArrayCodec implements support for arrays. If pgtype supports type T then it can easily support []T by registering an
+ArrayCodec for the appropriate PostgreSQL OID. In addition, Array[T] type can support multi-dimensional arrays.
+
+CompositeCodec implements support for PostgreSQL composite types. Go structs can be scanned into if the public fields of
+the struct are in the exact order and type of the PostgreSQL type or by implementing CompositeIndexScanner and
+CompositeIndexGetter.
+
+Domain types are treated as their underlying type if the underlying type and the domain type are registered.
+
+PostgreSQL enums can usually be treated as text. However, EnumCodec implements support for interning strings which can
+reduce memory usage.
+
+While pgtype will often still work with unregistered types it is highly recommended that all types be registered due to
+an improvement in performance and the elimination of certain edge cases.
+
+If an entirely new PostgreSQL type (e.g. PostGIS types) is used then the application or a library can create a new
+Codec. Then the OID / Codec mapping can be registered with Map.RegisterType. There is no difference between a Codec
+defined and registered by the application and a Codec built in to pgtype. See any of the Codecs in pgtype for Codec
+examples and for examples of type registration.
+
+Encoding Unknown Types
+
+pgtype works best when the OID of the PostgreSQL type is known. But in some cases such as using the simple protocol the
+OID is unknown. In this case Map.RegisterDefaultPgType can be used to register an assumed OID for a particular Go type.
+
+Renamed Types
+
+If pgtype does not recognize a type and that type is a renamed simple type simple (e.g. type MyInt32 int32) pgtype acts
+as if it is the underlying type. It currently cannot automatically detect the underlying type of renamed structs (eg.g.
+type MyTime time.Time).
+
+Compatibility with database/sql
+
+pgtype also includes support for custom types implementing the database/sql.Scanner and database/sql/driver.Valuer
+interfaces.
+
+Encoding Typed Nils
+
+pgtype encodes untyped and typed nils (e.g. nil and []byte(nil)) to the SQL NULL value without going through the Codec
+system. This means that Codecs and other encoding logic do not have to handle nil or *T(nil).
+
+However, database/sql compatibility requires Value to be called on T(nil) when T implements driver.Valuer. Therefore,
+driver.Valuer values are only considered NULL when *T(nil) where driver.Valuer is implemented on T not on *T. See
+https://github.com/golang/go/issues/8415 and
+https://github.com/golang/go/commit/0ce1d79a6a771f7449ec493b993ed2a720917870.
+
+Child Records
+
+pgtype's support for arrays and composite records can be used to load records and their children in a single query. See
+example_child_records_test.go for an example.
+
+Overview of Scanning Implementation
+
+The first step is to use the OID to lookup the correct Codec. The Map will call the Codec's PlanScan method to get a
+plan for scanning into the Go value. A Codec will support scanning into one or more Go types. Oftentime these Go types
+are interfaces rather than explicit types. For example, PointCodec can use any Go type that implements the PointScanner
+and PointValuer interfaces.
+
+If a Go value is not supported directly by a Codec then Map will try see if it is a sql.Scanner. If is then that
+interface will be used to scan the value. Most sql.Scanners require the input to be in the text format (e.g. UUIDs and
+numeric). However, pgx will typically have received the value in the binary format. In this case the binary value will be
+parsed, reencoded as text, and then passed to the sql.Scanner. This may incur additional overhead for query results with
+a large number of affected values.
+
+If a Go value is not supported directly by a Codec then Map will try wrapping it with additional logic and try again.
+For example, Int8Codec does not support scanning into a renamed type (e.g. type myInt64 int64). But Map will detect that
+myInt64 is a renamed type and create a plan that converts the value to the underlying int64 type and then passes that to
+the Codec (see TryFindUnderlyingTypeScanPlan).
+
+These plan wrappers are contained in Map.TryWrapScanPlanFuncs. By default these contain shared logic to handle renamed
+types, pointers to pointers, slices, composite types, etc. Additional plan wrappers can be added to seamlessly integrate
+types that do not support pgx directly. For example, the before mentioned
+https://github.com/jackc/pgx-shopspring-decimal package detects decimal.Decimal values, wraps them in something
+implementing NumericScanner and passes that to the Codec.
+
+Map.Scan and Map.Encode are convenience methods that wrap Map.PlanScan and Map.PlanEncode. Determining how to scan or
+encode a particular type may be a time consuming operation. Hence the planning and execution steps of a conversion are
+internally separated.
+
+Reducing Compiled Binary Size
+
+pgx.QueryExecModeExec and pgx.QueryExecModeSimpleProtocol require the default PostgreSQL type to be registered for each
+Go type used as a query parameter. By default pgx does this for all supported types and their array variants. If an
+application does not use those query execution modes or manually registers the default PostgreSQL type for the types it
+uses as query parameters it can use the build tag nopgxregisterdefaulttypes. This omits the default type registration
+and reduces the compiled binary size by ~2MB.
+*/
+package pgtype
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/enum_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/enum_codec.go
new file mode 100644
index 0000000..5e787c1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/enum_codec.go
@@ -0,0 +1,109 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// EnumCodec is a codec that caches the strings it decodes. If the same string is read multiple times only one copy is
+// allocated. These strings are only garbage collected when the EnumCodec is garbage collected. EnumCodec can be used
+// for any text type not only enums, but it should only be used when there are a small number of possible values.
+type EnumCodec struct {
+ membersMap map[string]string // map to quickly lookup member and reuse string instead of allocating
+}
+
+func (EnumCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (EnumCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (EnumCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch value.(type) {
+ case string:
+ return encodePlanTextCodecString{}
+ case []byte:
+ return encodePlanTextCodecByteSlice{}
+ case TextValuer:
+ return encodePlanTextCodecTextValuer{}
+ }
+ }
+
+ return nil
+}
+
+func (c *EnumCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch target.(type) {
+ case *string:
+ return &scanPlanTextAnyToEnumString{codec: c}
+ case *[]byte:
+ return scanPlanAnyToNewByteSlice{}
+ case TextScanner:
+ return &scanPlanTextAnyToEnumTextScanner{codec: c}
+ }
+ }
+
+ return nil
+}
+
+func (c *EnumCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c *EnumCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ return c.lookupAndCacheString(src), nil
+}
+
+// lookupAndCacheString looks for src in the members map. If it is not found it is added to the map.
+func (c *EnumCodec) lookupAndCacheString(src []byte) string {
+ if c.membersMap == nil {
+ c.membersMap = make(map[string]string)
+ }
+
+ if s, found := c.membersMap[string(src)]; found {
+ return s
+ }
+
+ s := string(src)
+ c.membersMap[s] = s
+ return s
+}
+
+type scanPlanTextAnyToEnumString struct {
+ codec *EnumCodec
+}
+
+func (plan *scanPlanTextAnyToEnumString) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p := (dst).(*string)
+ *p = plan.codec.lookupAndCacheString(src)
+
+ return nil
+}
+
+type scanPlanTextAnyToEnumTextScanner struct {
+ codec *EnumCodec
+}
+
+func (plan *scanPlanTextAnyToEnumTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ return scanner.ScanText(Text{String: plan.codec.lookupAndCacheString(src), Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/float4.go b/vendor/github.com/jackc/pgx/v5/pgtype/float4.go
new file mode 100644
index 0000000..8646d9d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/float4.go
@@ -0,0 +1,319 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Float4 struct {
+ Float32 float32
+ Valid bool
+}
+
+// ScanFloat64 implements the Float64Scanner interface.
+func (f *Float4) ScanFloat64(n Float8) error {
+ *f = Float4{Float32: float32(n.Float64), Valid: n.Valid}
+ return nil
+}
+
+func (f Float4) Float64Value() (Float8, error) {
+ return Float8{Float64: float64(f.Float32), Valid: f.Valid}, nil
+}
+
+func (f *Float4) ScanInt64(n Int8) error {
+ *f = Float4{Float32: float32(n.Int64), Valid: n.Valid}
+ return nil
+}
+
+func (f Float4) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(f.Float32), Valid: f.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (f *Float4) Scan(src any) error {
+ if src == nil {
+ *f = Float4{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ *f = Float4{Float32: float32(src), Valid: true}
+ return nil
+ case string:
+ n, err := strconv.ParseFloat(string(src), 32)
+ if err != nil {
+ return err
+ }
+ *f = Float4{Float32: float32(n), Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (f Float4) Value() (driver.Value, error) {
+ if !f.Valid {
+ return nil, nil
+ }
+ return float64(f.Float32), nil
+}
+
+func (f Float4) MarshalJSON() ([]byte, error) {
+ if !f.Valid {
+ return []byte("null"), nil
+ }
+ return json.Marshal(f.Float32)
+}
+
+func (f *Float4) UnmarshalJSON(b []byte) error {
+ var n *float32
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *f = Float4{}
+ } else {
+ *f = Float4{Float32: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Float4Codec struct{}
+
+func (Float4Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Float4Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Float4Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case float32:
+ return encodePlanFloat4CodecBinaryFloat32{}
+ case Float64Valuer:
+ return encodePlanFloat4CodecBinaryFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanFloat4CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case float32:
+ return encodePlanTextFloat32{}
+ case Float64Valuer:
+ return encodePlanTextFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanFloat4CodecBinaryFloat32 struct{}
+
+func (encodePlanFloat4CodecBinaryFloat32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(float32)
+ return pgio.AppendUint32(buf, math.Float32bits(n)), nil
+}
+
+type encodePlanTextFloat32 struct{}
+
+func (encodePlanTextFloat32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(float32)
+ return append(buf, strconv.FormatFloat(float64(n), 'f', -1, 32)...), nil
+}
+
+type encodePlanFloat4CodecBinaryFloat64Valuer struct{}
+
+func (encodePlanFloat4CodecBinaryFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendUint32(buf, math.Float32bits(float32(n.Float64))), nil
+}
+
+type encodePlanFloat4CodecBinaryInt64Valuer struct{}
+
+func (encodePlanFloat4CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ f := float32(n.Int64)
+ return pgio.AppendUint32(buf, math.Float32bits(f)), nil
+}
+
+func (Float4Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *float32:
+ return scanPlanBinaryFloat4ToFloat32{}
+ case Float64Scanner:
+ return scanPlanBinaryFloat4ToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanBinaryFloat4ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryFloat4ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *float32:
+ return scanPlanTextAnyToFloat32{}
+ case Float64Scanner:
+ return scanPlanTextAnyToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryFloat4ToFloat32 struct{}
+
+func (scanPlanBinaryFloat4ToFloat32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ f := (dst).(*float32)
+ *f = math.Float32frombits(uint32(n))
+
+ return nil
+}
+
+type scanPlanBinaryFloat4ToFloat64Scanner struct{}
+
+func (scanPlanBinaryFloat4ToFloat64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Float64Scanner)
+
+ if src == nil {
+ return s.ScanFloat64(Float8{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ return s.ScanFloat64(Float8{Float64: float64(math.Float32frombits(uint32(n))), Valid: true})
+}
+
+type scanPlanBinaryFloat4ToInt64Scanner struct{}
+
+func (scanPlanBinaryFloat4ToInt64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Int64Scanner)
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ ui32 := int32(binary.BigEndian.Uint32(src))
+ f32 := math.Float32frombits(uint32(ui32))
+ i64 := int64(f32)
+ if f32 != float32(i64) {
+ return fmt.Errorf("cannot losslessly convert %v to int64", f32)
+ }
+
+ return s.ScanInt64(Int8{Int64: i64, Valid: true})
+}
+
+type scanPlanBinaryFloat4ToTextScanner struct{}
+
+func (scanPlanBinaryFloat4ToTextScanner) Scan(src []byte, dst any) error {
+ s := (dst).(TextScanner)
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ ui32 := int32(binary.BigEndian.Uint32(src))
+ f32 := math.Float32frombits(uint32(ui32))
+
+ return s.ScanText(Text{String: strconv.FormatFloat(float64(f32), 'f', -1, 32), Valid: true})
+}
+
+type scanPlanTextAnyToFloat32 struct{}
+
+func (scanPlanTextAnyToFloat32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ n, err := strconv.ParseFloat(string(src), 32)
+ if err != nil {
+ return err
+ }
+
+ f := (dst).(*float32)
+ *f = float32(n)
+
+ return nil
+}
+
+func (c Float4Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n float32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return float64(n), nil
+}
+
+func (c Float4Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n float32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/float8.go b/vendor/github.com/jackc/pgx/v5/pgtype/float8.go
new file mode 100644
index 0000000..9c923c9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/float8.go
@@ -0,0 +1,365 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Float64Scanner interface {
+ ScanFloat64(Float8) error
+}
+
+type Float64Valuer interface {
+ Float64Value() (Float8, error)
+}
+
+type Float8 struct {
+ Float64 float64
+ Valid bool
+}
+
+// ScanFloat64 implements the Float64Scanner interface.
+func (f *Float8) ScanFloat64(n Float8) error {
+ *f = n
+ return nil
+}
+
+func (f Float8) Float64Value() (Float8, error) {
+ return f, nil
+}
+
+func (f *Float8) ScanInt64(n Int8) error {
+ *f = Float8{Float64: float64(n.Int64), Valid: n.Valid}
+ return nil
+}
+
+func (f Float8) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(f.Float64), Valid: f.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (f *Float8) Scan(src any) error {
+ if src == nil {
+ *f = Float8{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ *f = Float8{Float64: src, Valid: true}
+ return nil
+ case string:
+ n, err := strconv.ParseFloat(string(src), 64)
+ if err != nil {
+ return err
+ }
+ *f = Float8{Float64: n, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (f Float8) Value() (driver.Value, error) {
+ if !f.Valid {
+ return nil, nil
+ }
+ return f.Float64, nil
+}
+
+func (f Float8) MarshalJSON() ([]byte, error) {
+ if !f.Valid {
+ return []byte("null"), nil
+ }
+ return json.Marshal(f.Float64)
+}
+
+func (f *Float8) UnmarshalJSON(b []byte) error {
+ var n *float64
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *f = Float8{}
+ } else {
+ *f = Float8{Float64: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Float8Codec struct{}
+
+func (Float8Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Float8Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Float8Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case float64:
+ return encodePlanFloat8CodecBinaryFloat64{}
+ case Float64Valuer:
+ return encodePlanFloat8CodecBinaryFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanFloat8CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case float64:
+ return encodePlanTextFloat64{}
+ case Float64Valuer:
+ return encodePlanTextFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanFloat8CodecBinaryFloat64 struct{}
+
+func (encodePlanFloat8CodecBinaryFloat64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(float64)
+ return pgio.AppendUint64(buf, math.Float64bits(n)), nil
+}
+
+type encodePlanTextFloat64 struct{}
+
+func (encodePlanTextFloat64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(float64)
+ return append(buf, strconv.FormatFloat(n, 'f', -1, 64)...), nil
+}
+
+type encodePlanFloat8CodecBinaryFloat64Valuer struct{}
+
+func (encodePlanFloat8CodecBinaryFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendUint64(buf, math.Float64bits(n.Float64)), nil
+}
+
+type encodePlanTextFloat64Valuer struct{}
+
+func (encodePlanTextFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return append(buf, strconv.FormatFloat(n.Float64, 'f', -1, 64)...), nil
+}
+
+type encodePlanFloat8CodecBinaryInt64Valuer struct{}
+
+func (encodePlanFloat8CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ f := float64(n.Int64)
+ return pgio.AppendUint64(buf, math.Float64bits(f)), nil
+}
+
+type encodePlanTextInt64Valuer struct{}
+
+func (encodePlanTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Float8Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *float64:
+ return scanPlanBinaryFloat8ToFloat64{}
+ case Float64Scanner:
+ return scanPlanBinaryFloat8ToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanBinaryFloat8ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryFloat8ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *float64:
+ return scanPlanTextAnyToFloat64{}
+ case Float64Scanner:
+ return scanPlanTextAnyToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryFloat8ToFloat64 struct{}
+
+func (scanPlanBinaryFloat8ToFloat64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for float8: %v", len(src))
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ f := (dst).(*float64)
+ *f = math.Float64frombits(uint64(n))
+
+ return nil
+}
+
+type scanPlanBinaryFloat8ToFloat64Scanner struct{}
+
+func (scanPlanBinaryFloat8ToFloat64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Float64Scanner)
+
+ if src == nil {
+ return s.ScanFloat64(Float8{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for float8: %v", len(src))
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ return s.ScanFloat64(Float8{Float64: math.Float64frombits(uint64(n)), Valid: true})
+}
+
+type scanPlanBinaryFloat8ToInt64Scanner struct{}
+
+func (scanPlanBinaryFloat8ToInt64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Int64Scanner)
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for float8: %v", len(src))
+ }
+
+ ui64 := int64(binary.BigEndian.Uint64(src))
+ f64 := math.Float64frombits(uint64(ui64))
+ i64 := int64(f64)
+ if f64 != float64(i64) {
+ return fmt.Errorf("cannot losslessly convert %v to int64", f64)
+ }
+
+ return s.ScanInt64(Int8{Int64: i64, Valid: true})
+}
+
+type scanPlanBinaryFloat8ToTextScanner struct{}
+
+func (scanPlanBinaryFloat8ToTextScanner) Scan(src []byte, dst any) error {
+ s := (dst).(TextScanner)
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for float8: %v", len(src))
+ }
+
+ ui64 := int64(binary.BigEndian.Uint64(src))
+ f64 := math.Float64frombits(uint64(ui64))
+
+ return s.ScanText(Text{String: strconv.FormatFloat(f64, 'f', -1, 64), Valid: true})
+}
+
+type scanPlanTextAnyToFloat64 struct{}
+
+func (scanPlanTextAnyToFloat64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ n, err := strconv.ParseFloat(string(src), 64)
+ if err != nil {
+ return err
+ }
+
+ f := (dst).(*float64)
+ *f = n
+
+ return nil
+}
+
+type scanPlanTextAnyToFloat64Scanner struct{}
+
+func (scanPlanTextAnyToFloat64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Float64Scanner)
+
+ if src == nil {
+ return s.ScanFloat64(Float8{})
+ }
+
+ n, err := strconv.ParseFloat(string(src), 64)
+ if err != nil {
+ return err
+ }
+
+ return s.ScanFloat64(Float8{Float64: n, Valid: true})
+}
+
+func (c Float8Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c Float8Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n float64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/hstore.go b/vendor/github.com/jackc/pgx/v5/pgtype/hstore.go
new file mode 100644
index 0000000..2f34f4c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/hstore.go
@@ -0,0 +1,486 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type HstoreScanner interface {
+ ScanHstore(v Hstore) error
+}
+
+type HstoreValuer interface {
+ HstoreValue() (Hstore, error)
+}
+
+// Hstore represents an hstore column that can be null or have null values
+// associated with its keys.
+type Hstore map[string]*string
+
+func (h *Hstore) ScanHstore(v Hstore) error {
+ *h = v
+ return nil
+}
+
+func (h Hstore) HstoreValue() (Hstore, error) {
+ return h, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (h *Hstore) Scan(src any) error {
+ if src == nil {
+ *h = nil
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToHstoreScanner{}.scanString(src, h)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (h Hstore) Value() (driver.Value, error) {
+ if h == nil {
+ return nil, nil
+ }
+
+ buf, err := HstoreCodec{}.PlanEncode(nil, 0, TextFormatCode, h).Encode(h, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type HstoreCodec struct{}
+
+func (HstoreCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (HstoreCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (HstoreCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(HstoreValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanHstoreCodecBinary{}
+ case TextFormatCode:
+ return encodePlanHstoreCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanHstoreCodecBinary struct{}
+
+func (encodePlanHstoreCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ hstore, err := value.(HstoreValuer).HstoreValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if hstore == nil {
+ return nil, nil
+ }
+
+ buf = pgio.AppendInt32(buf, int32(len(hstore)))
+
+ for k, v := range hstore {
+ buf = pgio.AppendInt32(buf, int32(len(k)))
+ buf = append(buf, k...)
+
+ if v == nil {
+ buf = pgio.AppendInt32(buf, -1)
+ } else {
+ buf = pgio.AppendInt32(buf, int32(len(*v)))
+ buf = append(buf, (*v)...)
+ }
+ }
+
+ return buf, nil
+}
+
+type encodePlanHstoreCodecText struct{}
+
+func (encodePlanHstoreCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ hstore, err := value.(HstoreValuer).HstoreValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(hstore) == 0 {
+ // distinguish between empty and nil: Not strictly required by Postgres, since its protocol
+ // explicitly marks NULL column values separately. However, the Binary codec does this, and
+ // this means we can "round trip" Encode and Scan without data loss.
+ // nil: []byte(nil); empty: []byte{}
+ if hstore == nil {
+ return nil, nil
+ }
+ return []byte{}, nil
+ }
+
+ firstPair := true
+
+ for k, v := range hstore {
+ if firstPair {
+ firstPair = false
+ } else {
+ buf = append(buf, ',', ' ')
+ }
+
+ // unconditionally quote hstore keys/values like Postgres does
+ // this avoids a Mac OS X Postgres hstore parsing bug:
+ // https://www.postgresql.org/message-id/CA%2BHWA9awUW0%2BRV_gO9r1ABZwGoZxPztcJxPy8vMFSTbTfi4jig%40mail.gmail.com
+ buf = append(buf, '"')
+ buf = append(buf, quoteArrayReplacer.Replace(k)...)
+ buf = append(buf, '"')
+ buf = append(buf, "=>"...)
+
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, '"')
+ buf = append(buf, quoteArrayReplacer.Replace(*v)...)
+ buf = append(buf, '"')
+ }
+ }
+
+ return buf, nil
+}
+
+func (HstoreCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case HstoreScanner:
+ return scanPlanBinaryHstoreToHstoreScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case HstoreScanner:
+ return scanPlanTextAnyToHstoreScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryHstoreToHstoreScanner struct{}
+
+func (scanPlanBinaryHstoreToHstoreScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(HstoreScanner)
+
+ if src == nil {
+ return scanner.ScanHstore(Hstore(nil))
+ }
+
+ rp := 0
+
+ const uint32Len = 4
+ if len(src[rp:]) < uint32Len {
+ return fmt.Errorf("hstore incomplete %v", src)
+ }
+ pairCount := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += uint32Len
+
+ hstore := make(Hstore, pairCount)
+ // one allocation for all *string, rather than one per string, just like text parsing
+ valueStrings := make([]string, pairCount)
+
+ for i := 0; i < pairCount; i++ {
+ if len(src[rp:]) < uint32Len {
+ return fmt.Errorf("hstore incomplete %v", src)
+ }
+ keyLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += uint32Len
+
+ if len(src[rp:]) < keyLen {
+ return fmt.Errorf("hstore incomplete %v", src)
+ }
+ key := string(src[rp : rp+keyLen])
+ rp += keyLen
+
+ if len(src[rp:]) < uint32Len {
+ return fmt.Errorf("hstore incomplete %v", src)
+ }
+ valueLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ if valueLen >= 0 {
+ valueStrings[i] = string(src[rp : rp+valueLen])
+ rp += valueLen
+
+ hstore[key] = &valueStrings[i]
+ } else {
+ hstore[key] = nil
+ }
+ }
+
+ return scanner.ScanHstore(hstore)
+}
+
+type scanPlanTextAnyToHstoreScanner struct{}
+
+func (s scanPlanTextAnyToHstoreScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(HstoreScanner)
+
+ if src == nil {
+ return scanner.ScanHstore(Hstore(nil))
+ }
+ return s.scanString(string(src), scanner)
+}
+
+// scanString does not return nil hstore values because string cannot be nil.
+func (scanPlanTextAnyToHstoreScanner) scanString(src string, scanner HstoreScanner) error {
+ hstore, err := parseHstore(src)
+ if err != nil {
+ return err
+ }
+ return scanner.ScanHstore(hstore)
+}
+
+func (c HstoreCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c HstoreCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var hstore Hstore
+ err := codecScan(c, m, oid, format, src, &hstore)
+ if err != nil {
+ return nil, err
+ }
+ return hstore, nil
+}
+
+type hstoreParser struct {
+ str string
+ pos int
+ nextBackslash int
+}
+
+func newHSP(in string) *hstoreParser {
+ return &hstoreParser{
+ pos: 0,
+ str: in,
+ nextBackslash: strings.IndexByte(in, '\\'),
+ }
+}
+
+func (p *hstoreParser) atEnd() bool {
+ return p.pos >= len(p.str)
+}
+
+// consume returns the next byte of the string, or end if the string is done.
+func (p *hstoreParser) consume() (b byte, end bool) {
+ if p.pos >= len(p.str) {
+ return 0, true
+ }
+ b = p.str[p.pos]
+ p.pos++
+ return b, false
+}
+
+func unexpectedByteErr(actualB byte, expectedB byte) error {
+ return fmt.Errorf("expected '%c' ('%#v'); found '%c' ('%#v')", expectedB, expectedB, actualB, actualB)
+}
+
+// consumeExpectedByte consumes expectedB from the string, or returns an error.
+func (p *hstoreParser) consumeExpectedByte(expectedB byte) error {
+ nextB, end := p.consume()
+ if end {
+ return fmt.Errorf("expected '%c' ('%#v'); found end", expectedB, expectedB)
+ }
+ if nextB != expectedB {
+ return unexpectedByteErr(nextB, expectedB)
+ }
+ return nil
+}
+
+// consumeExpected2 consumes two expected bytes or returns an error.
+// This was a bit faster than using a string argument (better inlining? Not sure).
+func (p *hstoreParser) consumeExpected2(one byte, two byte) error {
+ if p.pos+2 > len(p.str) {
+ return errors.New("unexpected end of string")
+ }
+ if p.str[p.pos] != one {
+ return unexpectedByteErr(p.str[p.pos], one)
+ }
+ if p.str[p.pos+1] != two {
+ return unexpectedByteErr(p.str[p.pos+1], two)
+ }
+ p.pos += 2
+ return nil
+}
+
+var errEOSInQuoted = errors.New(`found end before closing double-quote ('"')`)
+
+// consumeDoubleQuoted consumes a double-quoted string from p. The double quote must have been
+// parsed already. This copies the string from the backing string so it can be garbage collected.
+func (p *hstoreParser) consumeDoubleQuoted() (string, error) {
+ // fast path: assume most keys/values do not contain escapes
+ nextDoubleQuote := strings.IndexByte(p.str[p.pos:], '"')
+ if nextDoubleQuote == -1 {
+ return "", errEOSInQuoted
+ }
+ nextDoubleQuote += p.pos
+ if p.nextBackslash == -1 || p.nextBackslash > nextDoubleQuote {
+ // clone the string from the source string to ensure it can be garbage collected separately
+ // TODO: use strings.Clone on Go 1.20; this could get optimized away
+ s := strings.Clone(p.str[p.pos:nextDoubleQuote])
+ p.pos = nextDoubleQuote + 1
+ return s, nil
+ }
+
+ // slow path: string contains escapes
+ s, err := p.consumeDoubleQuotedWithEscapes(p.nextBackslash)
+ p.nextBackslash = strings.IndexByte(p.str[p.pos:], '\\')
+ if p.nextBackslash != -1 {
+ p.nextBackslash += p.pos
+ }
+ return s, err
+}
+
+// consumeDoubleQuotedWithEscapes consumes a double-quoted string containing escapes, starting
+// at p.pos, and with the first backslash at firstBackslash. This copies the string so it can be
+// garbage collected separately.
+func (p *hstoreParser) consumeDoubleQuotedWithEscapes(firstBackslash int) (string, error) {
+ // copy the prefix that does not contain backslashes
+ var builder strings.Builder
+ builder.WriteString(p.str[p.pos:firstBackslash])
+
+ // skip to the backslash
+ p.pos = firstBackslash
+
+ // copy bytes until the end, unescaping backslashes
+ for {
+ nextB, end := p.consume()
+ if end {
+ return "", errEOSInQuoted
+ } else if nextB == '"' {
+ break
+ } else if nextB == '\\' {
+ // escape: skip the backslash and copy the char
+ nextB, end = p.consume()
+ if end {
+ return "", errEOSInQuoted
+ }
+ if !(nextB == '\\' || nextB == '"') {
+ return "", fmt.Errorf("unexpected escape in quoted string: found '%#v'", nextB)
+ }
+ builder.WriteByte(nextB)
+ } else {
+ // normal byte: copy it
+ builder.WriteByte(nextB)
+ }
+ }
+ return builder.String(), nil
+}
+
+// consumePairSeparator consumes the Hstore pair separator ", " or returns an error.
+func (p *hstoreParser) consumePairSeparator() error {
+ return p.consumeExpected2(',', ' ')
+}
+
+// consumeKVSeparator consumes the Hstore key/value separator "=>" or returns an error.
+func (p *hstoreParser) consumeKVSeparator() error {
+ return p.consumeExpected2('=', '>')
+}
+
+// consumeDoubleQuotedOrNull consumes the Hstore key/value separator "=>" or returns an error.
+func (p *hstoreParser) consumeDoubleQuotedOrNull() (Text, error) {
+ // peek at the next byte
+ if p.atEnd() {
+ return Text{}, errors.New("found end instead of value")
+ }
+ next := p.str[p.pos]
+ if next == 'N' {
+ // must be the exact string NULL: use consumeExpected2 twice
+ err := p.consumeExpected2('N', 'U')
+ if err != nil {
+ return Text{}, err
+ }
+ err = p.consumeExpected2('L', 'L')
+ if err != nil {
+ return Text{}, err
+ }
+ return Text{String: "", Valid: false}, nil
+ } else if next != '"' {
+ return Text{}, unexpectedByteErr(next, '"')
+ }
+
+ // skip the double quote
+ p.pos += 1
+ s, err := p.consumeDoubleQuoted()
+ if err != nil {
+ return Text{}, err
+ }
+ return Text{String: s, Valid: true}, nil
+}
+
+func parseHstore(s string) (Hstore, error) {
+ p := newHSP(s)
+
+ // This is an over-estimate of the number of key/value pairs. Use '>' because I am guessing it
+ // is less likely to occur in keys/values than '=' or ','.
+ numPairsEstimate := strings.Count(s, ">")
+ // makes one allocation of strings for the entire Hstore, rather than one allocation per value.
+ valueStrings := make([]string, 0, numPairsEstimate)
+ result := make(Hstore, numPairsEstimate)
+ first := true
+ for !p.atEnd() {
+ if !first {
+ err := p.consumePairSeparator()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ first = false
+ }
+
+ err := p.consumeExpectedByte('"')
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := p.consumeDoubleQuoted()
+ if err != nil {
+ return nil, err
+ }
+
+ err = p.consumeKVSeparator()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := p.consumeDoubleQuotedOrNull()
+ if err != nil {
+ return nil, err
+ }
+ if value.Valid {
+ valueStrings = append(valueStrings, value.String)
+ result[key] = &valueStrings[len(valueStrings)-1]
+ } else {
+ result[key] = nil
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/inet.go b/vendor/github.com/jackc/pgx/v5/pgtype/inet.go
new file mode 100644
index 0000000..6ca10ea
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/inet.go
@@ -0,0 +1,200 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "net/netip"
+)
+
+// Network address family is dependent on server socket.h value for AF_INET.
+// In practice, all platforms appear to have the same value. See
+// src/include/utils/inet.h for more information.
+const (
+ defaultAFInet = 2
+ defaultAFInet6 = 3
+)
+
+type NetipPrefixScanner interface {
+ ScanNetipPrefix(v netip.Prefix) error
+}
+
+type NetipPrefixValuer interface {
+ NetipPrefixValue() (netip.Prefix, error)
+}
+
+// InetCodec handles both inet and cidr PostgreSQL types. The preferred Go types are netip.Prefix and netip.Addr. If
+// IsValid() is false then they are treated as SQL NULL.
+type InetCodec struct{}
+
+func (InetCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (InetCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (InetCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(NetipPrefixValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanInetCodecBinary{}
+ case TextFormatCode:
+ return encodePlanInetCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanInetCodecBinary struct{}
+
+func (encodePlanInetCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ prefix, err := value.(NetipPrefixValuer).NetipPrefixValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !prefix.IsValid() {
+ return nil, nil
+ }
+
+ var family byte
+ if prefix.Addr().Is4() {
+ family = defaultAFInet
+ } else {
+ family = defaultAFInet6
+ }
+
+ buf = append(buf, family)
+
+ ones := prefix.Bits()
+ buf = append(buf, byte(ones))
+
+ // is_cidr is ignored on server
+ buf = append(buf, 0)
+
+ if family == defaultAFInet {
+ buf = append(buf, byte(4))
+ b := prefix.Addr().As4()
+ buf = append(buf, b[:]...)
+ } else {
+ buf = append(buf, byte(16))
+ b := prefix.Addr().As16()
+ buf = append(buf, b[:]...)
+ }
+
+ return buf, nil
+}
+
+type encodePlanInetCodecText struct{}
+
+func (encodePlanInetCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ prefix, err := value.(NetipPrefixValuer).NetipPrefixValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !prefix.IsValid() {
+ return nil, nil
+ }
+
+ return append(buf, prefix.String()...), nil
+}
+
+func (InetCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case NetipPrefixScanner:
+ return scanPlanBinaryInetToNetipPrefixScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case NetipPrefixScanner:
+ return scanPlanTextAnyToNetipPrefixScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c InetCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c InetCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var prefix netip.Prefix
+ err := codecScan(c, m, oid, format, src, (*netipPrefixWrapper)(&prefix))
+ if err != nil {
+ return nil, err
+ }
+
+ if !prefix.IsValid() {
+ return nil, nil
+ }
+
+ return prefix, nil
+}
+
+type scanPlanBinaryInetToNetipPrefixScanner struct{}
+
+func (scanPlanBinaryInetToNetipPrefixScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(NetipPrefixScanner)
+
+ if src == nil {
+ return scanner.ScanNetipPrefix(netip.Prefix{})
+ }
+
+ if len(src) != 8 && len(src) != 20 {
+ return fmt.Errorf("Received an invalid size for an inet: %d", len(src))
+ }
+
+ // ignore family
+ bits := src[1]
+ // ignore is_cidr
+ // ignore addressLength - implicit in length of message
+
+ addr, ok := netip.AddrFromSlice(src[4:])
+ if !ok {
+ return errors.New("netip.AddrFromSlice failed")
+ }
+
+ return scanner.ScanNetipPrefix(netip.PrefixFrom(addr, int(bits)))
+}
+
+type scanPlanTextAnyToNetipPrefixScanner struct{}
+
+func (scanPlanTextAnyToNetipPrefixScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(NetipPrefixScanner)
+
+ if src == nil {
+ return scanner.ScanNetipPrefix(netip.Prefix{})
+ }
+
+ var prefix netip.Prefix
+ if bytes.IndexByte(src, '/') == -1 {
+ addr, err := netip.ParseAddr(string(src))
+ if err != nil {
+ return err
+ }
+ prefix = netip.PrefixFrom(addr, addr.BitLen())
+ } else {
+ var err error
+ prefix, err = netip.ParsePrefix(string(src))
+ if err != nil {
+ return err
+ }
+ }
+
+ return scanner.ScanNetipPrefix(prefix)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/int.go b/vendor/github.com/jackc/pgx/v5/pgtype/int.go
new file mode 100644
index 0000000..7a2f8cb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/int.go
@@ -0,0 +1,1981 @@
+// Code generated from pgtype/int.go.erb. DO NOT EDIT.
+
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Int64Scanner interface {
+ ScanInt64(Int8) error
+}
+
+type Int64Valuer interface {
+ Int64Value() (Int8, error)
+}
+
+type Int2 struct {
+ Int16 int16
+ Valid bool
+}
+
+// ScanInt64 implements the Int64Scanner interface.
+func (dst *Int2) ScanInt64(n Int8) error {
+ if !n.Valid {
+ *dst = Int2{}
+ return nil
+ }
+
+ if n.Int64 < math.MinInt16 {
+ return fmt.Errorf("%d is less than minimum value for Int2", n.Int64)
+ }
+ if n.Int64 > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for Int2", n.Int64)
+ }
+ *dst = Int2{Int16: int16(n.Int64), Valid: true}
+
+ return nil
+}
+
+func (n Int2) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(n.Int16), Valid: n.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int2) Scan(src any) error {
+ if src == nil {
+ *dst = Int2{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ var err error
+ n, err = strconv.ParseInt(src, 10, 16)
+ if err != nil {
+ return err
+ }
+ case []byte:
+ var err error
+ n, err = strconv.ParseInt(string(src), 10, 16)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < math.MinInt16 {
+ return fmt.Errorf("%d is greater than maximum value for Int2", n)
+ }
+ if n > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for Int2", n)
+ }
+ *dst = Int2{Int16: int16(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int2) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Int16), nil
+}
+
+func (src Int2) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+ return []byte(strconv.FormatInt(int64(src.Int16), 10)), nil
+}
+
+func (dst *Int2) UnmarshalJSON(b []byte) error {
+ var n *int16
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *dst = Int2{}
+ } else {
+ *dst = Int2{Int16: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Int2Codec struct{}
+
+func (Int2Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Int2Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Int2Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case int16:
+ return encodePlanInt2CodecBinaryInt16{}
+ case Int64Valuer:
+ return encodePlanInt2CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case int16:
+ return encodePlanInt2CodecTextInt16{}
+ case Int64Valuer:
+ return encodePlanInt2CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanInt2CodecBinaryInt16 struct{}
+
+func (encodePlanInt2CodecBinaryInt16) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int16)
+ return pgio.AppendInt16(buf, int16(n)), nil
+}
+
+type encodePlanInt2CodecTextInt16 struct{}
+
+func (encodePlanInt2CodecTextInt16) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int16)
+ return append(buf, strconv.FormatInt(int64(n), 10)...), nil
+}
+
+type encodePlanInt2CodecBinaryInt64Valuer struct{}
+
+func (encodePlanInt2CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt16 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int2", n.Int64)
+ }
+ if n.Int64 < math.MinInt16 {
+ return nil, fmt.Errorf("%d is less than minimum value for int2", n.Int64)
+ }
+
+ return pgio.AppendInt16(buf, int16(n.Int64)), nil
+}
+
+type encodePlanInt2CodecTextInt64Valuer struct{}
+
+func (encodePlanInt2CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt16 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int2", n.Int64)
+ }
+ if n.Int64 < math.MinInt16 {
+ return nil, fmt.Errorf("%d is less than minimum value for int2", n.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Int2Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanBinaryInt2ToInt8{}
+ case *int16:
+ return scanPlanBinaryInt2ToInt16{}
+ case *int32:
+ return scanPlanBinaryInt2ToInt32{}
+ case *int64:
+ return scanPlanBinaryInt2ToInt64{}
+ case *int:
+ return scanPlanBinaryInt2ToInt{}
+ case *uint8:
+ return scanPlanBinaryInt2ToUint8{}
+ case *uint16:
+ return scanPlanBinaryInt2ToUint16{}
+ case *uint32:
+ return scanPlanBinaryInt2ToUint32{}
+ case *uint64:
+ return scanPlanBinaryInt2ToUint64{}
+ case *uint:
+ return scanPlanBinaryInt2ToUint{}
+ case Int64Scanner:
+ return scanPlanBinaryInt2ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryInt2ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanTextAnyToInt8{}
+ case *int16:
+ return scanPlanTextAnyToInt16{}
+ case *int32:
+ return scanPlanTextAnyToInt32{}
+ case *int64:
+ return scanPlanTextAnyToInt64{}
+ case *int:
+ return scanPlanTextAnyToInt{}
+ case *uint8:
+ return scanPlanTextAnyToUint8{}
+ case *uint16:
+ return scanPlanTextAnyToUint16{}
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case *uint:
+ return scanPlanTextAnyToUint{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Int2Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+func (c Int2Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int16
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryInt2ToInt8 struct{}
+
+func (scanPlanBinaryInt2ToInt8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < math.MinInt8 {
+ return fmt.Errorf("%d is less than minimum value for int8", n)
+ } else if n > math.MaxInt8 {
+ return fmt.Errorf("%d is greater than maximum value for int8", n)
+ }
+
+ *p = int8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint8 struct{}
+
+func (scanPlanBinaryInt2ToUint8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint8", n)
+ }
+
+ if n > math.MaxUint8 {
+ return fmt.Errorf("%d is greater than maximum value for uint8", n)
+ }
+
+ *p = uint8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt16 struct{}
+
+func (scanPlanBinaryInt2ToInt16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int16(binary.BigEndian.Uint16(src))
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint16 struct{}
+
+func (scanPlanBinaryInt2ToUint16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint16", n)
+ }
+
+ *p = uint16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt32 struct{}
+
+func (scanPlanBinaryInt2ToInt32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int32(int16(binary.BigEndian.Uint16(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint32 struct{}
+
+func (scanPlanBinaryInt2ToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint32", n)
+ }
+
+ *p = uint32(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt64 struct{}
+
+func (scanPlanBinaryInt2ToInt64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int64(int16(binary.BigEndian.Uint16(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint64 struct{}
+
+func (scanPlanBinaryInt2ToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", n)
+ }
+
+ *p = uint64(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt struct{}
+
+func (scanPlanBinaryInt2ToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int(int16(binary.BigEndian.Uint16(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint struct{}
+
+func (scanPlanBinaryInt2ToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(int16(binary.BigEndian.Uint16(src)))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint", n)
+ }
+
+ *p = uint(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt64Scanner struct{}
+
+func (scanPlanBinaryInt2ToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ n := int64(int16(binary.BigEndian.Uint16(src)))
+
+ return s.ScanInt64(Int8{Int64: n, Valid: true})
+}
+
+type scanPlanBinaryInt2ToTextScanner struct{}
+
+func (scanPlanBinaryInt2ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ n := int64(int16(binary.BigEndian.Uint16(src)))
+
+ return s.ScanText(Text{String: strconv.FormatInt(n, 10), Valid: true})
+}
+
+type Int4 struct {
+ Int32 int32
+ Valid bool
+}
+
+// ScanInt64 implements the Int64Scanner interface.
+func (dst *Int4) ScanInt64(n Int8) error {
+ if !n.Valid {
+ *dst = Int4{}
+ return nil
+ }
+
+ if n.Int64 < math.MinInt32 {
+ return fmt.Errorf("%d is less than minimum value for Int4", n.Int64)
+ }
+ if n.Int64 > math.MaxInt32 {
+ return fmt.Errorf("%d is greater than maximum value for Int4", n.Int64)
+ }
+ *dst = Int4{Int32: int32(n.Int64), Valid: true}
+
+ return nil
+}
+
+func (n Int4) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(n.Int32), Valid: n.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int4) Scan(src any) error {
+ if src == nil {
+ *dst = Int4{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ var err error
+ n, err = strconv.ParseInt(src, 10, 32)
+ if err != nil {
+ return err
+ }
+ case []byte:
+ var err error
+ n, err = strconv.ParseInt(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < math.MinInt32 {
+ return fmt.Errorf("%d is greater than maximum value for Int4", n)
+ }
+ if n > math.MaxInt32 {
+ return fmt.Errorf("%d is greater than maximum value for Int4", n)
+ }
+ *dst = Int4{Int32: int32(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int4) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Int32), nil
+}
+
+func (src Int4) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+ return []byte(strconv.FormatInt(int64(src.Int32), 10)), nil
+}
+
+func (dst *Int4) UnmarshalJSON(b []byte) error {
+ var n *int32
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *dst = Int4{}
+ } else {
+ *dst = Int4{Int32: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Int4Codec struct{}
+
+func (Int4Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Int4Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Int4Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case int32:
+ return encodePlanInt4CodecBinaryInt32{}
+ case Int64Valuer:
+ return encodePlanInt4CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case int32:
+ return encodePlanInt4CodecTextInt32{}
+ case Int64Valuer:
+ return encodePlanInt4CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanInt4CodecBinaryInt32 struct{}
+
+func (encodePlanInt4CodecBinaryInt32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int32)
+ return pgio.AppendInt32(buf, int32(n)), nil
+}
+
+type encodePlanInt4CodecTextInt32 struct{}
+
+func (encodePlanInt4CodecTextInt32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int32)
+ return append(buf, strconv.FormatInt(int64(n), 10)...), nil
+}
+
+type encodePlanInt4CodecBinaryInt64Valuer struct{}
+
+func (encodePlanInt4CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt32 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int4", n.Int64)
+ }
+ if n.Int64 < math.MinInt32 {
+ return nil, fmt.Errorf("%d is less than minimum value for int4", n.Int64)
+ }
+
+ return pgio.AppendInt32(buf, int32(n.Int64)), nil
+}
+
+type encodePlanInt4CodecTextInt64Valuer struct{}
+
+func (encodePlanInt4CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt32 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int4", n.Int64)
+ }
+ if n.Int64 < math.MinInt32 {
+ return nil, fmt.Errorf("%d is less than minimum value for int4", n.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Int4Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanBinaryInt4ToInt8{}
+ case *int16:
+ return scanPlanBinaryInt4ToInt16{}
+ case *int32:
+ return scanPlanBinaryInt4ToInt32{}
+ case *int64:
+ return scanPlanBinaryInt4ToInt64{}
+ case *int:
+ return scanPlanBinaryInt4ToInt{}
+ case *uint8:
+ return scanPlanBinaryInt4ToUint8{}
+ case *uint16:
+ return scanPlanBinaryInt4ToUint16{}
+ case *uint32:
+ return scanPlanBinaryInt4ToUint32{}
+ case *uint64:
+ return scanPlanBinaryInt4ToUint64{}
+ case *uint:
+ return scanPlanBinaryInt4ToUint{}
+ case Int64Scanner:
+ return scanPlanBinaryInt4ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryInt4ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanTextAnyToInt8{}
+ case *int16:
+ return scanPlanTextAnyToInt16{}
+ case *int32:
+ return scanPlanTextAnyToInt32{}
+ case *int64:
+ return scanPlanTextAnyToInt64{}
+ case *int:
+ return scanPlanTextAnyToInt{}
+ case *uint8:
+ return scanPlanTextAnyToUint8{}
+ case *uint16:
+ return scanPlanTextAnyToUint16{}
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case *uint:
+ return scanPlanTextAnyToUint{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Int4Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+func (c Int4Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryInt4ToInt8 struct{}
+
+func (scanPlanBinaryInt4ToInt8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < math.MinInt8 {
+ return fmt.Errorf("%d is less than minimum value for int8", n)
+ } else if n > math.MaxInt8 {
+ return fmt.Errorf("%d is greater than maximum value for int8", n)
+ }
+
+ *p = int8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint8 struct{}
+
+func (scanPlanBinaryInt4ToUint8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint8", n)
+ }
+
+ if n > math.MaxUint8 {
+ return fmt.Errorf("%d is greater than maximum value for uint8", n)
+ }
+
+ *p = uint8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt16 struct{}
+
+func (scanPlanBinaryInt4ToInt16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < math.MinInt16 {
+ return fmt.Errorf("%d is less than minimum value for int16", n)
+ } else if n > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for int16", n)
+ }
+
+ *p = int16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint16 struct{}
+
+func (scanPlanBinaryInt4ToUint16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint16", n)
+ }
+
+ if n > math.MaxUint16 {
+ return fmt.Errorf("%d is greater than maximum value for uint16", n)
+ }
+
+ *p = uint16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt32 struct{}
+
+func (scanPlanBinaryInt4ToInt32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int32(binary.BigEndian.Uint32(src))
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint32 struct{}
+
+func (scanPlanBinaryInt4ToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint32", n)
+ }
+
+ *p = uint32(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt64 struct{}
+
+func (scanPlanBinaryInt4ToInt64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int64(int32(binary.BigEndian.Uint32(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint64 struct{}
+
+func (scanPlanBinaryInt4ToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", n)
+ }
+
+ *p = uint64(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt struct{}
+
+func (scanPlanBinaryInt4ToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int(int32(binary.BigEndian.Uint32(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint struct{}
+
+func (scanPlanBinaryInt4ToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(int32(binary.BigEndian.Uint32(src)))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint", n)
+ }
+
+ *p = uint(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt64Scanner struct{}
+
+func (scanPlanBinaryInt4ToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ n := int64(int32(binary.BigEndian.Uint32(src)))
+
+ return s.ScanInt64(Int8{Int64: n, Valid: true})
+}
+
+type scanPlanBinaryInt4ToTextScanner struct{}
+
+func (scanPlanBinaryInt4ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ n := int64(int32(binary.BigEndian.Uint32(src)))
+
+ return s.ScanText(Text{String: strconv.FormatInt(n, 10), Valid: true})
+}
+
+type Int8 struct {
+ Int64 int64
+ Valid bool
+}
+
+// ScanInt64 implements the Int64Scanner interface.
+func (dst *Int8) ScanInt64(n Int8) error {
+ if !n.Valid {
+ *dst = Int8{}
+ return nil
+ }
+
+ if n.Int64 < math.MinInt64 {
+ return fmt.Errorf("%d is less than minimum value for Int8", n.Int64)
+ }
+ if n.Int64 > math.MaxInt64 {
+ return fmt.Errorf("%d is greater than maximum value for Int8", n.Int64)
+ }
+ *dst = Int8{Int64: int64(n.Int64), Valid: true}
+
+ return nil
+}
+
+func (n Int8) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(n.Int64), Valid: n.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int8) Scan(src any) error {
+ if src == nil {
+ *dst = Int8{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ var err error
+ n, err = strconv.ParseInt(src, 10, 64)
+ if err != nil {
+ return err
+ }
+ case []byte:
+ var err error
+ n, err = strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < math.MinInt64 {
+ return fmt.Errorf("%d is greater than maximum value for Int8", n)
+ }
+ if n > math.MaxInt64 {
+ return fmt.Errorf("%d is greater than maximum value for Int8", n)
+ }
+ *dst = Int8{Int64: int64(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int8) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Int64), nil
+}
+
+func (src Int8) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+ return []byte(strconv.FormatInt(int64(src.Int64), 10)), nil
+}
+
+func (dst *Int8) UnmarshalJSON(b []byte) error {
+ var n *int64
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *dst = Int8{}
+ } else {
+ *dst = Int8{Int64: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Int8Codec struct{}
+
+func (Int8Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Int8Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Int8Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case int64:
+ return encodePlanInt8CodecBinaryInt64{}
+ case Int64Valuer:
+ return encodePlanInt8CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case int64:
+ return encodePlanInt8CodecTextInt64{}
+ case Int64Valuer:
+ return encodePlanInt8CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanInt8CodecBinaryInt64 struct{}
+
+func (encodePlanInt8CodecBinaryInt64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int64)
+ return pgio.AppendInt64(buf, int64(n)), nil
+}
+
+type encodePlanInt8CodecTextInt64 struct{}
+
+func (encodePlanInt8CodecTextInt64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int64)
+ return append(buf, strconv.FormatInt(int64(n), 10)...), nil
+}
+
+type encodePlanInt8CodecBinaryInt64Valuer struct{}
+
+func (encodePlanInt8CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt64 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int8", n.Int64)
+ }
+ if n.Int64 < math.MinInt64 {
+ return nil, fmt.Errorf("%d is less than minimum value for int8", n.Int64)
+ }
+
+ return pgio.AppendInt64(buf, int64(n.Int64)), nil
+}
+
+type encodePlanInt8CodecTextInt64Valuer struct{}
+
+func (encodePlanInt8CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt64 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int8", n.Int64)
+ }
+ if n.Int64 < math.MinInt64 {
+ return nil, fmt.Errorf("%d is less than minimum value for int8", n.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Int8Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanBinaryInt8ToInt8{}
+ case *int16:
+ return scanPlanBinaryInt8ToInt16{}
+ case *int32:
+ return scanPlanBinaryInt8ToInt32{}
+ case *int64:
+ return scanPlanBinaryInt8ToInt64{}
+ case *int:
+ return scanPlanBinaryInt8ToInt{}
+ case *uint8:
+ return scanPlanBinaryInt8ToUint8{}
+ case *uint16:
+ return scanPlanBinaryInt8ToUint16{}
+ case *uint32:
+ return scanPlanBinaryInt8ToUint32{}
+ case *uint64:
+ return scanPlanBinaryInt8ToUint64{}
+ case *uint:
+ return scanPlanBinaryInt8ToUint{}
+ case Int64Scanner:
+ return scanPlanBinaryInt8ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryInt8ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanTextAnyToInt8{}
+ case *int16:
+ return scanPlanTextAnyToInt16{}
+ case *int32:
+ return scanPlanTextAnyToInt32{}
+ case *int64:
+ return scanPlanTextAnyToInt64{}
+ case *int:
+ return scanPlanTextAnyToInt{}
+ case *uint8:
+ return scanPlanTextAnyToUint8{}
+ case *uint16:
+ return scanPlanTextAnyToUint16{}
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case *uint:
+ return scanPlanTextAnyToUint{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Int8Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+func (c Int8Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryInt8ToInt8 struct{}
+
+func (scanPlanBinaryInt8ToInt8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < math.MinInt8 {
+ return fmt.Errorf("%d is less than minimum value for int8", n)
+ } else if n > math.MaxInt8 {
+ return fmt.Errorf("%d is greater than maximum value for int8", n)
+ }
+
+ *p = int8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint8 struct{}
+
+func (scanPlanBinaryInt8ToUint8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint8", n)
+ }
+
+ if n > math.MaxUint8 {
+ return fmt.Errorf("%d is greater than maximum value for uint8", n)
+ }
+
+ *p = uint8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt16 struct{}
+
+func (scanPlanBinaryInt8ToInt16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < math.MinInt16 {
+ return fmt.Errorf("%d is less than minimum value for int16", n)
+ } else if n > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for int16", n)
+ }
+
+ *p = int16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint16 struct{}
+
+func (scanPlanBinaryInt8ToUint16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint16", n)
+ }
+
+ if n > math.MaxUint16 {
+ return fmt.Errorf("%d is greater than maximum value for uint16", n)
+ }
+
+ *p = uint16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt32 struct{}
+
+func (scanPlanBinaryInt8ToInt32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < math.MinInt32 {
+ return fmt.Errorf("%d is less than minimum value for int32", n)
+ } else if n > math.MaxInt32 {
+ return fmt.Errorf("%d is greater than maximum value for int32", n)
+ }
+
+ *p = int32(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint32 struct{}
+
+func (scanPlanBinaryInt8ToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint32", n)
+ }
+
+ if n > math.MaxUint32 {
+ return fmt.Errorf("%d is greater than maximum value for uint32", n)
+ }
+
+ *p = uint32(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt64 struct{}
+
+func (scanPlanBinaryInt8ToInt64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int64(binary.BigEndian.Uint64(src))
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint64 struct{}
+
+func (scanPlanBinaryInt8ToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", n)
+ }
+
+ *p = uint64(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt struct{}
+
+func (scanPlanBinaryInt8ToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < math.MinInt {
+ return fmt.Errorf("%d is less than minimum value for int", n)
+ } else if n > math.MaxInt {
+ return fmt.Errorf("%d is greater than maximum value for int", n)
+ }
+
+ *p = int(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint struct{}
+
+func (scanPlanBinaryInt8ToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(int64(binary.BigEndian.Uint64(src)))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint", n)
+ }
+
+ if uint64(n) > math.MaxUint {
+ return fmt.Errorf("%d is greater than maximum value for uint", n)
+ }
+
+ *p = uint(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt64Scanner struct{}
+
+func (scanPlanBinaryInt8ToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ n := int64(int64(binary.BigEndian.Uint64(src)))
+
+ return s.ScanInt64(Int8{Int64: n, Valid: true})
+}
+
+type scanPlanBinaryInt8ToTextScanner struct{}
+
+func (scanPlanBinaryInt8ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ n := int64(int64(binary.BigEndian.Uint64(src)))
+
+ return s.ScanText(Text{String: strconv.FormatInt(n, 10), Valid: true})
+}
+
+type scanPlanTextAnyToInt8 struct{}
+
+func (scanPlanTextAnyToInt8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 8)
+ if err != nil {
+ return err
+ }
+
+ *p = int8(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint8 struct{}
+
+func (scanPlanTextAnyToUint8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 8)
+ if err != nil {
+ return err
+ }
+
+ *p = uint8(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt16 struct{}
+
+func (scanPlanTextAnyToInt16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 16)
+ if err != nil {
+ return err
+ }
+
+ *p = int16(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint16 struct{}
+
+func (scanPlanTextAnyToUint16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 16)
+ if err != nil {
+ return err
+ }
+
+ *p = uint16(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt32 struct{}
+
+func (scanPlanTextAnyToInt32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *p = int32(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint32 struct{}
+
+func (scanPlanTextAnyToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *p = uint32(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt64 struct{}
+
+func (scanPlanTextAnyToInt64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ *p = int64(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint64 struct{}
+
+func (scanPlanTextAnyToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ *p = uint64(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt struct{}
+
+func (scanPlanTextAnyToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 0)
+ if err != nil {
+ return err
+ }
+
+ *p = int(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint struct{}
+
+func (scanPlanTextAnyToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 0)
+ if err != nil {
+ return err
+ }
+
+ *p = uint(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt64Scanner struct{}
+
+func (scanPlanTextAnyToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ err = s.ScanInt64(Int8{Int64: n, Valid: true})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/int.go.erb b/vendor/github.com/jackc/pgx/v5/pgtype/int.go.erb
new file mode 100644
index 0000000..e0c8b7a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/int.go.erb
@@ -0,0 +1,548 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Int64Scanner interface {
+ ScanInt64(Int8) error
+}
+
+type Int64Valuer interface {
+ Int64Value() (Int8, error)
+}
+
+
+<% [2, 4, 8].each do |pg_byte_size| %>
+<% pg_bit_size = pg_byte_size * 8 %>
+type Int<%= pg_byte_size %> struct {
+ Int<%= pg_bit_size %> int<%= pg_bit_size %>
+ Valid bool
+}
+
+// ScanInt64 implements the Int64Scanner interface.
+func (dst *Int<%= pg_byte_size %>) ScanInt64(n Int8) error {
+ if !n.Valid {
+ *dst = Int<%= pg_byte_size %>{}
+ return nil
+ }
+
+ if n.Int64 < math.MinInt<%= pg_bit_size %> {
+ return fmt.Errorf("%d is less than minimum value for Int<%= pg_byte_size %>", n.Int64)
+ }
+ if n.Int64 > math.MaxInt<%= pg_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for Int<%= pg_byte_size %>", n.Int64)
+ }
+ *dst = Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: int<%= pg_bit_size %>(n.Int64), Valid: true}
+
+ return nil
+}
+
+func (n Int<%= pg_byte_size %>) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(n.Int<%= pg_bit_size %>), Valid: n.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int<%= pg_byte_size %>) Scan(src any) error {
+ if src == nil {
+ *dst = Int<%= pg_byte_size %>{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ var err error
+ n, err = strconv.ParseInt(src, 10, <%= pg_bit_size %>)
+ if err != nil {
+ return err
+ }
+ case []byte:
+ var err error
+ n, err = strconv.ParseInt(string(src), 10, <%= pg_bit_size %>)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < math.MinInt<%= pg_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for Int<%= pg_byte_size %>", n)
+ }
+ if n > math.MaxInt<%= pg_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for Int<%= pg_byte_size %>", n)
+ }
+ *dst = Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: int<%= pg_bit_size %>(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int<%= pg_byte_size %>) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Int<%= pg_bit_size %>), nil
+}
+
+func (src Int<%= pg_byte_size %>) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+ return []byte(strconv.FormatInt(int64(src.Int<%= pg_bit_size %>), 10)), nil
+}
+
+func (dst *Int<%= pg_byte_size %>) UnmarshalJSON(b []byte) error {
+ var n *int<%= pg_bit_size %>
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *dst = Int<%= pg_byte_size %>{}
+ } else {
+ *dst = Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Int<%= pg_byte_size %>Codec struct{}
+
+func (Int<%= pg_byte_size %>Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Int<%= pg_byte_size %>Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Int<%= pg_byte_size %>Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case int<%= pg_bit_size %>:
+ return encodePlanInt<%= pg_byte_size %>CodecBinaryInt<%= pg_bit_size %>{}
+ case Int64Valuer:
+ return encodePlanInt<%= pg_byte_size %>CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case int<%= pg_bit_size %>:
+ return encodePlanInt<%= pg_byte_size %>CodecTextInt<%= pg_bit_size %>{}
+ case Int64Valuer:
+ return encodePlanInt<%= pg_byte_size %>CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanInt<%= pg_byte_size %>CodecBinaryInt<%= pg_bit_size %> struct{}
+
+func (encodePlanInt<%= pg_byte_size %>CodecBinaryInt<%= pg_bit_size %>) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int<%= pg_bit_size %>)
+ return pgio.AppendInt<%= pg_bit_size %>(buf, int<%= pg_bit_size %>(n)), nil
+}
+
+type encodePlanInt<%= pg_byte_size %>CodecTextInt<%= pg_bit_size %> struct{}
+
+func (encodePlanInt<%= pg_byte_size %>CodecTextInt<%= pg_bit_size %>) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int<%= pg_bit_size %>)
+ return append(buf, strconv.FormatInt(int64(n), 10)...), nil
+}
+
+type encodePlanInt<%= pg_byte_size %>CodecBinaryInt64Valuer struct{}
+
+func (encodePlanInt<%= pg_byte_size %>CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt<%= pg_bit_size %> {
+ return nil, fmt.Errorf("%d is greater than maximum value for int<%= pg_byte_size %>", n.Int64)
+ }
+ if n.Int64 < math.MinInt<%= pg_bit_size %> {
+ return nil, fmt.Errorf("%d is less than minimum value for int<%= pg_byte_size %>", n.Int64)
+ }
+
+ return pgio.AppendInt<%= pg_bit_size %>(buf, int<%= pg_bit_size %>(n.Int64)), nil
+}
+
+type encodePlanInt<%= pg_byte_size %>CodecTextInt64Valuer struct{}
+
+func (encodePlanInt<%= pg_byte_size %>CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt<%= pg_bit_size %> {
+ return nil, fmt.Errorf("%d is greater than maximum value for int<%= pg_byte_size %>", n.Int64)
+ }
+ if n.Int64 < math.MinInt<%= pg_bit_size %> {
+ return nil, fmt.Errorf("%d is less than minimum value for int<%= pg_byte_size %>", n.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Int<%= pg_byte_size %>Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt8{}
+ case *int16:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt16{}
+ case *int32:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt32{}
+ case *int64:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt64{}
+ case *int:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt{}
+ case *uint8:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint8{}
+ case *uint16:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint16{}
+ case *uint32:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint32{}
+ case *uint64:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint64{}
+ case *uint:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint{}
+ case Int64Scanner:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanTextAnyToInt8{}
+ case *int16:
+ return scanPlanTextAnyToInt16{}
+ case *int32:
+ return scanPlanTextAnyToInt32{}
+ case *int64:
+ return scanPlanTextAnyToInt64{}
+ case *int:
+ return scanPlanTextAnyToInt{}
+ case *uint8:
+ return scanPlanTextAnyToUint8{}
+ case *uint16:
+ return scanPlanTextAnyToUint16{}
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case *uint:
+ return scanPlanTextAnyToUint{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Int<%= pg_byte_size %>Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+func (c Int<%= pg_byte_size %>Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int<%= pg_bit_size %>
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+<%# PostgreSQL binary format integer to fixed size Go integers %>
+<% [8, 16, 32, 64].each do |dst_bit_size| %>
+type scanPlanBinaryInt<%= pg_byte_size %>ToInt<%= dst_bit_size %> struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToInt<%= dst_bit_size %>) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for int<%= pg_byte_size %>: %v", len(src))
+ }
+
+ p, ok := (dst).(*int<%= dst_bit_size %>)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ <% if dst_bit_size < pg_bit_size %>
+ n := int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src))
+ if n < math.MinInt<%= dst_bit_size %> {
+ return fmt.Errorf("%d is less than minimum value for int<%= dst_bit_size %>", n)
+ } else if n > math.MaxInt<%= dst_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for int<%= dst_bit_size %>", n)
+ }
+
+ *p = int<%= dst_bit_size %>(n)
+ <% elsif dst_bit_size == pg_bit_size %>
+ *p = int<%= dst_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src))
+ <% else %>
+ *p = int<%= dst_bit_size %>(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+ <% end %>
+
+ return nil
+}
+
+type scanPlanBinaryInt<%= pg_byte_size %>ToUint<%= dst_bit_size %> struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToUint<%= dst_bit_size %>) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for uint<%= pg_byte_size %>: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint<%= dst_bit_size %>)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint<%= dst_bit_size %>", n)
+ }
+ <% if dst_bit_size < pg_bit_size %>
+ if n > math.MaxUint<%= dst_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for uint<%= dst_bit_size %>", n)
+ }
+ <% end %>
+ *p = uint<%= dst_bit_size %>(n)
+
+ return nil
+}
+<% end %>
+
+<%# PostgreSQL binary format integer to Go machine integers %>
+type scanPlanBinaryInt<%= pg_byte_size %>ToInt struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for int<%= pg_byte_size %>: %v", len(src))
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ <% if 32 < pg_bit_size %>
+ n := int64(binary.BigEndian.Uint<%= pg_bit_size %>(src))
+ if n < math.MinInt {
+ return fmt.Errorf("%d is less than minimum value for int", n)
+ } else if n > math.MaxInt {
+ return fmt.Errorf("%d is greater than maximum value for int", n)
+ }
+
+ *p = int(n)
+ <% else %>
+ *p = int(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+ <% end %>
+
+ return nil
+}
+
+type scanPlanBinaryInt<%= pg_byte_size %>ToUint struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for uint<%= pg_byte_size %>: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint", n)
+ }
+ <% if 32 < pg_bit_size %>
+ if uint64(n) > math.MaxUint {
+ return fmt.Errorf("%d is greater than maximum value for uint", n)
+ }
+ <% end %>
+ *p = uint(n)
+
+ return nil
+}
+
+<%# PostgreSQL binary format integer to Go Int64Scanner %>
+type scanPlanBinaryInt<%= pg_byte_size %>ToInt64Scanner struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for int<%= pg_byte_size %>: %v", len(src))
+ }
+
+
+ n := int64(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+
+ return s.ScanInt64(Int8{Int64: n, Valid: true})
+}
+
+<%# PostgreSQL binary format integer to Go TextScanner %>
+type scanPlanBinaryInt<%= pg_byte_size %>ToTextScanner struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for int<%= pg_byte_size %>: %v", len(src))
+ }
+
+
+ n := int64(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+
+ return s.ScanText(Text{String: strconv.FormatInt(n, 10), Valid: true})
+}
+<% end %>
+
+<%# Any text to all integer types %>
+<% [
+ ["8", 8],
+ ["16", 16],
+ ["32", 32],
+ ["64", 64],
+ ["", 0]
+].each do |type_suffix, bit_size| %>
+type scanPlanTextAnyToInt<%= type_suffix %> struct{}
+
+func (scanPlanTextAnyToInt<%= type_suffix %>) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int<%= type_suffix %>)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, <%= bit_size %>)
+ if err != nil {
+ return err
+ }
+
+ *p = int<%= type_suffix %>(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint<%= type_suffix %> struct{}
+
+func (scanPlanTextAnyToUint<%= type_suffix %>) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint<%= type_suffix %>)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, <%= bit_size %>)
+ if err != nil {
+ return err
+ }
+
+ *p = uint<%= type_suffix %>(n)
+ return nil
+}
+<% end %>
+
+type scanPlanTextAnyToInt64Scanner struct{}
+
+func (scanPlanTextAnyToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ err = s.ScanInt64(Int8{Int64: n, Valid: true})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/int_test.go.erb b/vendor/github.com/jackc/pgx/v5/pgtype/int_test.go.erb
new file mode 100644
index 0000000..ac9a3f1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/int_test.go.erb
@@ -0,0 +1,93 @@
+package pgtype_test
+
+import (
+ "math"
+ "testing"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+<% [2, 4, 8].each do |pg_byte_size| %>
+<% pg_bit_size = pg_byte_size * 8 %>
+func TestInt<%= pg_byte_size %>Codec(t *testing.T) {
+ pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int<%= pg_byte_size %>", []pgxtest.ValueRoundTripTest{
+ {int8(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int16(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int32(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int64(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint8(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint16(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint32(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint64(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 1, Valid: true}, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int32(-1), new(pgtype.Int<%= pg_byte_size %>), isExpectedEq(pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: -1, Valid: true})},
+ {1, new(int8), isExpectedEq(int8(1))},
+ {1, new(int16), isExpectedEq(int16(1))},
+ {1, new(int32), isExpectedEq(int32(1))},
+ {1, new(int64), isExpectedEq(int64(1))},
+ {1, new(uint8), isExpectedEq(uint8(1))},
+ {1, new(uint16), isExpectedEq(uint16(1))},
+ {1, new(uint32), isExpectedEq(uint32(1))},
+ {1, new(uint64), isExpectedEq(uint64(1))},
+ {1, new(int), isExpectedEq(int(1))},
+ {1, new(uint), isExpectedEq(uint(1))},
+ {-1, new(int8), isExpectedEq(int8(-1))},
+ {-1, new(int16), isExpectedEq(int16(-1))},
+ {-1, new(int32), isExpectedEq(int32(-1))},
+ {-1, new(int64), isExpectedEq(int64(-1))},
+ {-1, new(int), isExpectedEq(int(-1))},
+ {math.MinInt<%= pg_bit_size %>, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(math.MinInt<%= pg_bit_size %>))},
+ {-1, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(-1))},
+ {0, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(0))},
+ {1, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {math.MaxInt<%= pg_bit_size %>, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(math.MaxInt<%= pg_bit_size %>))},
+ {1, new(pgtype.Int<%= pg_byte_size %>), isExpectedEq(pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 1, Valid: true})},
+ {"1", new(string), isExpectedEq("1")},
+ {pgtype.Int<%= pg_byte_size %>{}, new(pgtype.Int<%= pg_byte_size %>), isExpectedEq(pgtype.Int<%= pg_byte_size %>{})},
+ {nil, new(*int<%= pg_bit_size %>), isExpectedEq((*int<%= pg_bit_size %>)(nil))},
+ })
+}
+
+func TestInt<%= pg_byte_size %>MarshalJSON(t *testing.T) {
+ successfulTests := []struct {
+ source pgtype.Int<%= pg_byte_size %>
+ result string
+ }{
+ {source: pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 0}, result: "null"},
+ {source: pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 1, Valid: true}, result: "1"},
+ }
+ for i, tt := range successfulTests {
+ r, err := tt.source.MarshalJSON()
+ if err != nil {
+ t.Errorf("%d: %v", i, err)
+ }
+
+ if string(r) != tt.result {
+ t.Errorf("%d: expected %v to convert to %v, but it was %v", i, tt.source, tt.result, string(r))
+ }
+ }
+}
+
+func TestInt<%= pg_byte_size %>UnmarshalJSON(t *testing.T) {
+ successfulTests := []struct {
+ source string
+ result pgtype.Int<%= pg_byte_size %>
+ }{
+ {source: "null", result: pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 0}},
+ {source: "1", result: pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 1, Valid: true}},
+ }
+ for i, tt := range successfulTests {
+ var r pgtype.Int<%= pg_byte_size %>
+ err := r.UnmarshalJSON([]byte(tt.source))
+ if err != nil {
+ t.Errorf("%d: %v", i, err)
+ }
+
+ if r != tt.result {
+ t.Errorf("%d: expected %v to convert to %v, but it was %v", i, tt.source, tt.result, r)
+ }
+ }
+}
+<% end %>
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test.go.erb b/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test.go.erb
new file mode 100644
index 0000000..6f40115
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test.go.erb
@@ -0,0 +1,62 @@
+package pgtype_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/jackc/pgx/v5/pgtype/testutil"
+ "github.com/jackc/pgx/v5"
+)
+
+<%
+ [
+ ["int4", ["int16", "int32", "int64", "uint64", "pgtype.Int4"], [[1, 1], [1, 10], [10, 1], [100, 10]]],
+ ["numeric", ["int64", "float64", "pgtype.Numeric"], [[1, 1], [1, 10], [10, 1], [100, 10]]],
+ ].each do |pg_type, go_types, rows_columns|
+%>
+<% go_types.each do |go_type| %>
+<% rows_columns.each do |rows, columns| %>
+<% [["Text", "pgx.TextFormatCode"], ["Binary", "pgx.BinaryFormatCode"]].each do |format_name, format_code| %>
+func BenchmarkQuery<%= format_name %>FormatDecode_PG_<%= pg_type %>_to_Go_<%= go_type.gsub(/\W/, "_") %>_<%= rows %>_rows_<%= columns %>_columns(b *testing.B) {
+ defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
+ b.ResetTimer()
+ var v [<%= columns %>]<%= go_type %>
+ for i := 0; i < b.N; i++ {
+ rows, _ := conn.Query(
+ ctx,
+ `select <% columns.times do |col_idx| %><% if col_idx != 0 %>, <% end %>n::<%= pg_type %> + <%= col_idx%><% end %> from generate_series(1, <%= rows %>) n`,
+ pgx.QueryResultFormats{<%= format_code %>},
+ )
+ _, err := pgx.ForEachRow(rows, []any{<% columns.times do |col_idx| %><% if col_idx != 0 %>, <% end %>&v[<%= col_idx%>]<% end %>}, func() error { return nil })
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+<% end %>
+<% end %>
+<% end %>
+<% end %>
+
+<% [10, 100, 1000].each do |array_size| %>
+<% [["Text", "pgx.TextFormatCode"], ["Binary", "pgx.BinaryFormatCode"]].each do |format_name, format_code| %>
+func BenchmarkQuery<%= format_name %>FormatDecode_PG_Int4Array_With_Go_Int4Array_<%= array_size %>(b *testing.B) {
+ defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
+ b.ResetTimer()
+ var v []int32
+ for i := 0; i < b.N; i++ {
+ rows, _ := conn.Query(
+ ctx,
+ `select array_agg(n) from generate_series(1, <%= array_size %>) n`,
+ pgx.QueryResultFormats{<%= format_code %>},
+ )
+ _, err := pgx.ForEachRow(rows, []any{&v}, func() error { return nil })
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+<% end %>
+<% end %>
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test_gen.sh b/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test_gen.sh
new file mode 100644
index 0000000..22ac01a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test_gen.sh
@@ -0,0 +1,2 @@
+erb integration_benchmark_test.go.erb > integration_benchmark_test.go
+goimports -w integration_benchmark_test.go
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/interval.go b/vendor/github.com/jackc/pgx/v5/pgtype/interval.go
new file mode 100644
index 0000000..4b51162
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/interval.go
@@ -0,0 +1,297 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const (
+ microsecondsPerSecond = 1000000
+ microsecondsPerMinute = 60 * microsecondsPerSecond
+ microsecondsPerHour = 60 * microsecondsPerMinute
+ microsecondsPerDay = 24 * microsecondsPerHour
+ microsecondsPerMonth = 30 * microsecondsPerDay
+)
+
+type IntervalScanner interface {
+ ScanInterval(v Interval) error
+}
+
+type IntervalValuer interface {
+ IntervalValue() (Interval, error)
+}
+
+type Interval struct {
+ Microseconds int64
+ Days int32
+ Months int32
+ Valid bool
+}
+
+func (interval *Interval) ScanInterval(v Interval) error {
+ *interval = v
+ return nil
+}
+
+func (interval Interval) IntervalValue() (Interval, error) {
+ return interval, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (interval *Interval) Scan(src any) error {
+ if src == nil {
+ *interval = Interval{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToIntervalScanner{}.Scan([]byte(src), interval)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (interval Interval) Value() (driver.Value, error) {
+ if !interval.Valid {
+ return nil, nil
+ }
+
+ buf, err := IntervalCodec{}.PlanEncode(nil, 0, TextFormatCode, interval).Encode(interval, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type IntervalCodec struct{}
+
+func (IntervalCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (IntervalCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (IntervalCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(IntervalValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanIntervalCodecBinary{}
+ case TextFormatCode:
+ return encodePlanIntervalCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanIntervalCodecBinary struct{}
+
+func (encodePlanIntervalCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ interval, err := value.(IntervalValuer).IntervalValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !interval.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendInt64(buf, interval.Microseconds)
+ buf = pgio.AppendInt32(buf, interval.Days)
+ buf = pgio.AppendInt32(buf, interval.Months)
+ return buf, nil
+}
+
+type encodePlanIntervalCodecText struct{}
+
+func (encodePlanIntervalCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ interval, err := value.(IntervalValuer).IntervalValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !interval.Valid {
+ return nil, nil
+ }
+
+ if interval.Months != 0 {
+ buf = append(buf, strconv.FormatInt(int64(interval.Months), 10)...)
+ buf = append(buf, " mon "...)
+ }
+
+ if interval.Days != 0 {
+ buf = append(buf, strconv.FormatInt(int64(interval.Days), 10)...)
+ buf = append(buf, " day "...)
+ }
+
+ absMicroseconds := interval.Microseconds
+ if absMicroseconds < 0 {
+ absMicroseconds = -absMicroseconds
+ buf = append(buf, '-')
+ }
+
+ hours := absMicroseconds / microsecondsPerHour
+ minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute
+ seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond
+
+ timeStr := fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ buf = append(buf, timeStr...)
+
+ microseconds := absMicroseconds % microsecondsPerSecond
+ if microseconds != 0 {
+ buf = append(buf, fmt.Sprintf(".%06d", microseconds)...)
+ }
+
+ return buf, nil
+}
+
+func (IntervalCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case IntervalScanner:
+ return scanPlanBinaryIntervalToIntervalScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case IntervalScanner:
+ return scanPlanTextAnyToIntervalScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryIntervalToIntervalScanner struct{}
+
+func (scanPlanBinaryIntervalToIntervalScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(IntervalScanner)
+
+ if src == nil {
+ return scanner.ScanInterval(Interval{})
+ }
+
+ if len(src) != 16 {
+ return fmt.Errorf("Received an invalid size for an interval: %d", len(src))
+ }
+
+ microseconds := int64(binary.BigEndian.Uint64(src))
+ days := int32(binary.BigEndian.Uint32(src[8:]))
+ months := int32(binary.BigEndian.Uint32(src[12:]))
+
+ return scanner.ScanInterval(Interval{Microseconds: microseconds, Days: days, Months: months, Valid: true})
+}
+
+type scanPlanTextAnyToIntervalScanner struct{}
+
+func (scanPlanTextAnyToIntervalScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(IntervalScanner)
+
+ if src == nil {
+ return scanner.ScanInterval(Interval{})
+ }
+
+ var microseconds int64
+ var days int32
+ var months int32
+
+ parts := strings.Split(string(src), " ")
+
+ for i := 0; i < len(parts)-1; i += 2 {
+ scalar, err := strconv.ParseInt(parts[i], 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval format")
+ }
+
+ switch parts[i+1] {
+ case "year", "years":
+ months += int32(scalar * 12)
+ case "mon", "mons":
+ months += int32(scalar)
+ case "day", "days":
+ days = int32(scalar)
+ }
+ }
+
+ if len(parts)%2 == 1 {
+ timeParts := strings.SplitN(parts[len(parts)-1], ":", 3)
+ if len(timeParts) != 3 {
+ return fmt.Errorf("bad interval format")
+ }
+
+ var negative bool
+ if timeParts[0][0] == '-' {
+ negative = true
+ timeParts[0] = timeParts[0][1:]
+ }
+
+ hours, err := strconv.ParseInt(timeParts[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval hour format: %s", timeParts[0])
+ }
+
+ minutes, err := strconv.ParseInt(timeParts[1], 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval minute format: %s", timeParts[1])
+ }
+
+ sec, secFrac, secFracFound := strings.Cut(timeParts[2], ".")
+
+ seconds, err := strconv.ParseInt(sec, 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval second format: %s", sec)
+ }
+
+ var uSeconds int64
+ if secFracFound {
+ uSeconds, err = strconv.ParseInt(secFrac, 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval decimal format: %s", secFrac)
+ }
+
+ for i := 0; i < 6-len(secFrac); i++ {
+ uSeconds *= 10
+ }
+ }
+
+ microseconds = hours * microsecondsPerHour
+ microseconds += minutes * microsecondsPerMinute
+ microseconds += seconds * microsecondsPerSecond
+ microseconds += uSeconds
+
+ if negative {
+ microseconds = -microseconds
+ }
+ }
+
+ return scanner.ScanInterval(Interval{Months: months, Days: days, Microseconds: microseconds, Valid: true})
+}
+
+func (c IntervalCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c IntervalCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var interval Interval
+ err := codecScan(c, m, oid, format, src, &interval)
+ if err != nil {
+ return nil, err
+ }
+ return interval, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/json.go b/vendor/github.com/jackc/pgx/v5/pgtype/json.go
new file mode 100644
index 0000000..60aa2b7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/json.go
@@ -0,0 +1,243 @@
+package pgtype
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+type JSONCodec struct {
+ Marshal func(v any) ([]byte, error)
+ Unmarshal func(data []byte, v any) error
+}
+
+func (*JSONCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*JSONCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (c *JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch value.(type) {
+ case string:
+ return encodePlanJSONCodecEitherFormatString{}
+ case []byte:
+ return encodePlanJSONCodecEitherFormatByteSlice{}
+
+ // Handle json.RawMessage specifically because if it is run through json.Marshal it may be mutated.
+ // e.g. `{"foo": "bar"}` -> `{"foo":"bar"}`.
+ case json.RawMessage:
+ return encodePlanJSONCodecEitherFormatJSONRawMessage{}
+
+ // Cannot rely on driver.Valuer being handled later because anything can be marshalled.
+ //
+ // https://github.com/jackc/pgx/issues/1430
+ //
+ // Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to be used
+ // when both are implemented https://github.com/jackc/pgx/issues/1805
+ case driver.Valuer:
+ return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
+
+ // Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
+ // marshalled.
+ //
+ // https://github.com/jackc/pgx/issues/1681
+ case json.Marshaler:
+ return &encodePlanJSONCodecEitherFormatMarshal{
+ marshal: c.Marshal,
+ }
+ }
+
+ // Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
+ // appropriate wrappers here.
+ for _, f := range []TryWrapEncodePlanFunc{
+ TryWrapDerefPointerEncodePlan,
+ TryWrapFindUnderlyingTypeEncodePlan,
+ } {
+ if wrapperPlan, nextValue, ok := f(value); ok {
+ if nextPlan := c.PlanEncode(m, oid, format, nextValue); nextPlan != nil {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ return &encodePlanJSONCodecEitherFormatMarshal{
+ marshal: c.Marshal,
+ }
+}
+
+// JSON needs its on scan plan for pointers to handle 'null'::json(b).
+// Consider making pointerPointerScanPlan more flexible in the future.
+type jsonPointerScanPlan struct {
+ next ScanPlan
+}
+
+func (p jsonPointerScanPlan) Scan(src []byte, dst any) error {
+ el := reflect.ValueOf(dst).Elem()
+ if src == nil || string(src) == "null" {
+ el.SetZero()
+ return nil
+ }
+
+ el.Set(reflect.New(el.Type().Elem()))
+ if p.next != nil {
+ return p.next.Scan(src, el.Interface())
+ }
+
+ return nil
+}
+
+type encodePlanJSONCodecEitherFormatString struct{}
+
+func (encodePlanJSONCodecEitherFormatString) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ jsonString := value.(string)
+ buf = append(buf, jsonString...)
+ return buf, nil
+}
+
+type encodePlanJSONCodecEitherFormatByteSlice struct{}
+
+func (encodePlanJSONCodecEitherFormatByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ jsonBytes := value.([]byte)
+ if jsonBytes == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, jsonBytes...)
+ return buf, nil
+}
+
+type encodePlanJSONCodecEitherFormatJSONRawMessage struct{}
+
+func (encodePlanJSONCodecEitherFormatJSONRawMessage) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ jsonBytes := value.(json.RawMessage)
+ if jsonBytes == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, jsonBytes...)
+ return buf, nil
+}
+
+type encodePlanJSONCodecEitherFormatMarshal struct {
+ marshal func(v any) ([]byte, error)
+}
+
+func (e *encodePlanJSONCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ jsonBytes, err := e.marshal(value)
+ if err != nil {
+ return nil, err
+ }
+
+ buf = append(buf, jsonBytes...)
+ return buf, nil
+}
+
+func (c *JSONCodec) PlanScan(m *Map, oid uint32, formatCode int16, target any) ScanPlan {
+ return c.planScan(m, oid, formatCode, target, 0)
+}
+
+// JSON cannot fallback to pointerPointerScanPlan because of 'null'::json(b),
+// so we need to duplicate the logic here.
+func (c *JSONCodec) planScan(m *Map, oid uint32, formatCode int16, target any, depth int) ScanPlan {
+ if depth > 8 {
+ return &scanPlanFail{m: m, oid: oid, formatCode: formatCode}
+ }
+
+ switch target.(type) {
+ case *string:
+ return &scanPlanAnyToString{}
+ case *[]byte:
+ return &scanPlanJSONToByteSlice{}
+ case BytesScanner:
+ return &scanPlanBinaryBytesToBytesScanner{}
+ case sql.Scanner:
+ return &scanPlanSQLScanner{formatCode: formatCode}
+ }
+
+ rv := reflect.ValueOf(target)
+ if rv.Kind() == reflect.Pointer && rv.Elem().Kind() == reflect.Pointer {
+ var plan jsonPointerScanPlan
+ plan.next = c.planScan(m, oid, formatCode, rv.Elem().Interface(), depth+1)
+ return plan
+ } else {
+ return &scanPlanJSONToJSONUnmarshal{unmarshal: c.Unmarshal}
+ }
+}
+
+type scanPlanAnyToString struct{}
+
+func (scanPlanAnyToString) Scan(src []byte, dst any) error {
+ p := dst.(*string)
+ *p = string(src)
+ return nil
+}
+
+type scanPlanJSONToByteSlice struct{}
+
+func (scanPlanJSONToByteSlice) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanJSONToJSONUnmarshal struct {
+ unmarshal func(data []byte, v any) error
+}
+
+func (s *scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error {
+ if src == nil {
+ dstValue := reflect.ValueOf(dst)
+ if dstValue.Kind() == reflect.Ptr {
+ el := dstValue.Elem()
+ switch el.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface:
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+ }
+
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ v := reflect.ValueOf(dst)
+ if v.Kind() != reflect.Pointer || v.IsNil() {
+ return fmt.Errorf("cannot scan into non-pointer or nil destinations %T", dst)
+ }
+
+ elem := v.Elem()
+ elem.Set(reflect.Zero(elem.Type()))
+
+ return s.unmarshal(src, dst)
+}
+
+func (c *JSONCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ dstBuf := make([]byte, len(src))
+ copy(dstBuf, src)
+ return dstBuf, nil
+}
+
+func (c *JSONCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var dst any
+ err := c.Unmarshal(src, &dst)
+ return dst, err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go b/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go
new file mode 100644
index 0000000..4d4eb58
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go
@@ -0,0 +1,129 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+type JSONBCodec struct {
+ Marshal func(v any) ([]byte, error)
+ Unmarshal func(data []byte, v any) error
+}
+
+func (*JSONBCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*JSONBCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (c *JSONBCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ plan := (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanEncode(m, oid, TextFormatCode, value)
+ if plan != nil {
+ return &encodePlanJSONBCodecBinaryWrapper{textPlan: plan}
+ }
+ case TextFormatCode:
+ return (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanEncode(m, oid, format, value)
+ }
+
+ return nil
+}
+
+type encodePlanJSONBCodecBinaryWrapper struct {
+ textPlan EncodePlan
+}
+
+func (plan *encodePlanJSONBCodecBinaryWrapper) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ buf = append(buf, 1)
+ return plan.textPlan.Encode(value, buf)
+}
+
+func (c *JSONBCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ plan := (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanScan(m, oid, TextFormatCode, target)
+ if plan != nil {
+ return &scanPlanJSONBCodecBinaryUnwrapper{textPlan: plan}
+ }
+ case TextFormatCode:
+ return (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanScan(m, oid, format, target)
+ }
+
+ return nil
+}
+
+type scanPlanJSONBCodecBinaryUnwrapper struct {
+ textPlan ScanPlan
+}
+
+func (plan *scanPlanJSONBCodecBinaryUnwrapper) Scan(src []byte, dst any) error {
+ if src == nil {
+ return plan.textPlan.Scan(src, dst)
+ }
+
+ if len(src) == 0 {
+ return fmt.Errorf("jsonb too short")
+ }
+
+ if src[0] != 1 {
+ return fmt.Errorf("unknown jsonb version number %d", src[0])
+ }
+
+ return plan.textPlan.Scan(src[1:], dst)
+}
+
+func (c *JSONBCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ if len(src) == 0 {
+ return nil, fmt.Errorf("jsonb too short")
+ }
+
+ if src[0] != 1 {
+ return nil, fmt.Errorf("unknown jsonb version number %d", src[0])
+ }
+
+ dstBuf := make([]byte, len(src)-1)
+ copy(dstBuf, src[1:])
+ return dstBuf, nil
+ case TextFormatCode:
+ dstBuf := make([]byte, len(src))
+ copy(dstBuf, src)
+ return dstBuf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code: %v", format)
+ }
+}
+
+func (c *JSONBCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ if len(src) == 0 {
+ return nil, fmt.Errorf("jsonb too short")
+ }
+
+ if src[0] != 1 {
+ return nil, fmt.Errorf("unknown jsonb version number %d", src[0])
+ }
+
+ src = src[1:]
+ case TextFormatCode:
+ default:
+ return nil, fmt.Errorf("unknown format code: %v", format)
+ }
+
+ var dst any
+ err := c.Unmarshal(src, &dst)
+ return dst, err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/line.go b/vendor/github.com/jackc/pgx/v5/pgtype/line.go
new file mode 100644
index 0000000..4ae8003
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/line.go
@@ -0,0 +1,225 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type LineScanner interface {
+ ScanLine(v Line) error
+}
+
+type LineValuer interface {
+ LineValue() (Line, error)
+}
+
+type Line struct {
+ A, B, C float64
+ Valid bool
+}
+
+func (line *Line) ScanLine(v Line) error {
+ *line = v
+ return nil
+}
+
+func (line Line) LineValue() (Line, error) {
+ return line, nil
+}
+
+func (line *Line) Set(src any) error {
+ return fmt.Errorf("cannot convert %v to Line", src)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (line *Line) Scan(src any) error {
+ if src == nil {
+ *line = Line{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToLineScanner{}.Scan([]byte(src), line)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (line Line) Value() (driver.Value, error) {
+ if !line.Valid {
+ return nil, nil
+ }
+
+ buf, err := LineCodec{}.PlanEncode(nil, 0, TextFormatCode, line).Encode(line, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type LineCodec struct{}
+
+func (LineCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (LineCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (LineCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(LineValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanLineCodecBinary{}
+ case TextFormatCode:
+ return encodePlanLineCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanLineCodecBinary struct{}
+
+func (encodePlanLineCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ line, err := value.(LineValuer).LineValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !line.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(line.A))
+ buf = pgio.AppendUint64(buf, math.Float64bits(line.B))
+ buf = pgio.AppendUint64(buf, math.Float64bits(line.C))
+ return buf, nil
+}
+
+type encodePlanLineCodecText struct{}
+
+func (encodePlanLineCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ line, err := value.(LineValuer).LineValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !line.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`{%s,%s,%s}`,
+ strconv.FormatFloat(line.A, 'f', -1, 64),
+ strconv.FormatFloat(line.B, 'f', -1, 64),
+ strconv.FormatFloat(line.C, 'f', -1, 64),
+ )...)
+ return buf, nil
+}
+
+func (LineCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case LineScanner:
+ return scanPlanBinaryLineToLineScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case LineScanner:
+ return scanPlanTextAnyToLineScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryLineToLineScanner struct{}
+
+func (scanPlanBinaryLineToLineScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(LineScanner)
+
+ if src == nil {
+ return scanner.ScanLine(Line{})
+ }
+
+ if len(src) != 24 {
+ return fmt.Errorf("invalid length for line: %v", len(src))
+ }
+
+ a := binary.BigEndian.Uint64(src)
+ b := binary.BigEndian.Uint64(src[8:])
+ c := binary.BigEndian.Uint64(src[16:])
+
+ return scanner.ScanLine(Line{
+ A: math.Float64frombits(a),
+ B: math.Float64frombits(b),
+ C: math.Float64frombits(c),
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToLineScanner struct{}
+
+func (scanPlanTextAnyToLineScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(LineScanner)
+
+ if src == nil {
+ return scanner.ScanLine(Line{})
+ }
+
+ if len(src) < 7 {
+ return fmt.Errorf("invalid length for line: %v", len(src))
+ }
+
+ parts := strings.SplitN(string(src[1:len(src)-1]), ",", 3)
+ if len(parts) < 3 {
+ return fmt.Errorf("invalid format for line")
+ }
+
+ a, err := strconv.ParseFloat(parts[0], 64)
+ if err != nil {
+ return err
+ }
+
+ b, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ return err
+ }
+
+ c, err := strconv.ParseFloat(parts[2], 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanLine(Line{A: a, B: b, C: c, Valid: true})
+}
+
+func (c LineCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c LineCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var line Line
+ err := codecScan(c, m, oid, format, src, &line)
+ if err != nil {
+ return nil, err
+ }
+ return line, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/lseg.go b/vendor/github.com/jackc/pgx/v5/pgtype/lseg.go
new file mode 100644
index 0000000..05a86e1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/lseg.go
@@ -0,0 +1,238 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type LsegScanner interface {
+ ScanLseg(v Lseg) error
+}
+
+type LsegValuer interface {
+ LsegValue() (Lseg, error)
+}
+
+type Lseg struct {
+ P [2]Vec2
+ Valid bool
+}
+
+func (lseg *Lseg) ScanLseg(v Lseg) error {
+ *lseg = v
+ return nil
+}
+
+func (lseg Lseg) LsegValue() (Lseg, error) {
+ return lseg, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (lseg *Lseg) Scan(src any) error {
+ if src == nil {
+ *lseg = Lseg{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToLsegScanner{}.Scan([]byte(src), lseg)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (lseg Lseg) Value() (driver.Value, error) {
+ if !lseg.Valid {
+ return nil, nil
+ }
+
+ buf, err := LsegCodec{}.PlanEncode(nil, 0, TextFormatCode, lseg).Encode(lseg, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type LsegCodec struct{}
+
+func (LsegCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (LsegCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (LsegCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(LsegValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanLsegCodecBinary{}
+ case TextFormatCode:
+ return encodePlanLsegCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanLsegCodecBinary struct{}
+
+func (encodePlanLsegCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ lseg, err := value.(LsegValuer).LsegValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !lseg.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(lseg.P[0].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(lseg.P[0].Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(lseg.P[1].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(lseg.P[1].Y))
+ return buf, nil
+}
+
+type encodePlanLsegCodecText struct{}
+
+func (encodePlanLsegCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ lseg, err := value.(LsegValuer).LsegValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !lseg.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`[(%s,%s),(%s,%s)]`,
+ strconv.FormatFloat(lseg.P[0].X, 'f', -1, 64),
+ strconv.FormatFloat(lseg.P[0].Y, 'f', -1, 64),
+ strconv.FormatFloat(lseg.P[1].X, 'f', -1, 64),
+ strconv.FormatFloat(lseg.P[1].Y, 'f', -1, 64),
+ )...)
+ return buf, nil
+}
+
+func (LsegCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case LsegScanner:
+ return scanPlanBinaryLsegToLsegScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case LsegScanner:
+ return scanPlanTextAnyToLsegScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryLsegToLsegScanner struct{}
+
+func (scanPlanBinaryLsegToLsegScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(LsegScanner)
+
+ if src == nil {
+ return scanner.ScanLseg(Lseg{})
+ }
+
+ if len(src) != 32 {
+ return fmt.Errorf("invalid length for lseg: %v", len(src))
+ }
+
+ x1 := binary.BigEndian.Uint64(src)
+ y1 := binary.BigEndian.Uint64(src[8:])
+ x2 := binary.BigEndian.Uint64(src[16:])
+ y2 := binary.BigEndian.Uint64(src[24:])
+
+ return scanner.ScanLseg(Lseg{
+ P: [2]Vec2{
+ {math.Float64frombits(x1), math.Float64frombits(y1)},
+ {math.Float64frombits(x2), math.Float64frombits(y2)},
+ },
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToLsegScanner struct{}
+
+func (scanPlanTextAnyToLsegScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(LsegScanner)
+
+ if src == nil {
+ return scanner.ScanLseg(Lseg{})
+ }
+
+ if len(src) < 11 {
+ return fmt.Errorf("invalid length for lseg: %v", len(src))
+ }
+
+ str := string(src[2:])
+
+ var end int
+ end = strings.IndexByte(str, ',')
+
+ x1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+3:]
+ end = strings.IndexByte(str, ',')
+
+ x2, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1 : len(str)-2]
+
+ y2, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanLseg(Lseg{P: [2]Vec2{{x1, y1}, {x2, y2}}, Valid: true})
+}
+
+func (c LsegCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c LsegCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var lseg Lseg
+ err := codecScan(c, m, oid, format, src, &lseg)
+ if err != nil {
+ return nil, err
+ }
+ return lseg, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go b/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go
new file mode 100644
index 0000000..6af3177
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go
@@ -0,0 +1,122 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+type LtreeCodec struct{}
+
+func (l LtreeCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+// PreferredFormat returns the preferred format.
+func (l LtreeCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+// PlanEncode returns an EncodePlan for encoding value into PostgreSQL format for oid and format. If no plan can be
+// found then nil is returned.
+func (l LtreeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case TextFormatCode:
+ return (TextCodec)(l).PlanEncode(m, oid, format, value)
+ case BinaryFormatCode:
+ switch value.(type) {
+ case string:
+ return encodeLtreeCodecBinaryString{}
+ case []byte:
+ return encodeLtreeCodecBinaryByteSlice{}
+ case TextValuer:
+ return encodeLtreeCodecBinaryTextValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodeLtreeCodecBinaryString struct{}
+
+func (encodeLtreeCodecBinaryString) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ltree := value.(string)
+ buf = append(buf, 1)
+ return append(buf, ltree...), nil
+}
+
+type encodeLtreeCodecBinaryByteSlice struct{}
+
+func (encodeLtreeCodecBinaryByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ltree := value.([]byte)
+ buf = append(buf, 1)
+ return append(buf, ltree...), nil
+}
+
+type encodeLtreeCodecBinaryTextValuer struct{}
+
+func (encodeLtreeCodecBinaryTextValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TextValuer).TextValue()
+ if err != nil {
+ return nil, err
+ }
+ if !t.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, 1)
+ return append(buf, t.String...), nil
+}
+
+// PlanScan returns a ScanPlan for scanning a PostgreSQL value into a destination with the same type as target. If
+// no plan can be found then nil is returned.
+func (l LtreeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case TextFormatCode:
+ return (TextCodec)(l).PlanScan(m, oid, format, target)
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *string:
+ return scanPlanBinaryLtreeToString{}
+ case TextScanner:
+ return scanPlanBinaryLtreeToTextScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryLtreeToString struct{}
+
+func (scanPlanBinaryLtreeToString) Scan(src []byte, target any) error {
+ version := src[0]
+ if version != 1 {
+ return fmt.Errorf("unsupported ltree version %d", version)
+ }
+
+ p := (target).(*string)
+ *p = string(src[1:])
+
+ return nil
+}
+
+type scanPlanBinaryLtreeToTextScanner struct{}
+
+func (scanPlanBinaryLtreeToTextScanner) Scan(src []byte, target any) error {
+ version := src[0]
+ if version != 1 {
+ return fmt.Errorf("unsupported ltree version %d", version)
+ }
+
+ scanner := (target).(TextScanner)
+ return scanner.ScanText(Text{String: string(src[1:]), Valid: true})
+}
+
+// DecodeDatabaseSQLValue returns src decoded into a value compatible with the sql.Scanner interface.
+func (l LtreeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return (TextCodec)(l).DecodeDatabaseSQLValue(m, oid, format, src)
+}
+
+// DecodeValue returns src decoded into its default format.
+func (l LtreeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ return (TextCodec)(l).DecodeValue(m, oid, format, src)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/macaddr.go b/vendor/github.com/jackc/pgx/v5/pgtype/macaddr.go
new file mode 100644
index 0000000..e913ec9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/macaddr.go
@@ -0,0 +1,162 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "net"
+)
+
+type MacaddrCodec struct{}
+
+func (MacaddrCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (MacaddrCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (MacaddrCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case net.HardwareAddr:
+ return encodePlanMacaddrCodecBinaryHardwareAddr{}
+ case TextValuer:
+ return encodePlanMacAddrCodecTextValuer{}
+
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case net.HardwareAddr:
+ return encodePlanMacaddrCodecTextHardwareAddr{}
+ case TextValuer:
+ return encodePlanTextCodecTextValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanMacaddrCodecBinaryHardwareAddr struct{}
+
+func (encodePlanMacaddrCodecBinaryHardwareAddr) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ addr := value.(net.HardwareAddr)
+ if addr == nil {
+ return nil, nil
+ }
+
+ return append(buf, addr...), nil
+}
+
+type encodePlanMacAddrCodecTextValuer struct{}
+
+func (encodePlanMacAddrCodecTextValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TextValuer).TextValue()
+ if err != nil {
+ return nil, err
+ }
+ if !t.Valid {
+ return nil, nil
+ }
+
+ addr, err := net.ParseMAC(t.String)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(buf, addr...), nil
+}
+
+type encodePlanMacaddrCodecTextHardwareAddr struct{}
+
+func (encodePlanMacaddrCodecTextHardwareAddr) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ addr := value.(net.HardwareAddr)
+ if addr == nil {
+ return nil, nil
+ }
+
+ return append(buf, addr.String()...), nil
+}
+
+func (MacaddrCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *net.HardwareAddr:
+ return scanPlanBinaryMacaddrToHardwareAddr{}
+ case TextScanner:
+ return scanPlanBinaryMacaddrToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *net.HardwareAddr:
+ return scanPlanTextMacaddrToHardwareAddr{}
+ case TextScanner:
+ return scanPlanTextAnyToTextScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryMacaddrToHardwareAddr struct{}
+
+func (scanPlanBinaryMacaddrToHardwareAddr) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*net.HardwareAddr)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanBinaryMacaddrToTextScanner struct{}
+
+func (scanPlanBinaryMacaddrToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ return scanner.ScanText(Text{String: net.HardwareAddr(src).String(), Valid: true})
+}
+
+type scanPlanTextMacaddrToHardwareAddr struct{}
+
+func (scanPlanTextMacaddrToHardwareAddr) Scan(src []byte, dst any) error {
+ p := dst.(*net.HardwareAddr)
+
+ if src == nil {
+ *p = nil
+ return nil
+ }
+
+ addr, err := net.ParseMAC(string(src))
+ if err != nil {
+ return err
+ }
+
+ *p = addr
+
+ return nil
+}
+
+func (c MacaddrCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c MacaddrCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var addr net.HardwareAddr
+ err := codecScan(c, m, oid, format, src, &addr)
+ if err != nil {
+ return nil, err
+ }
+ return addr, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/multirange.go b/vendor/github.com/jackc/pgx/v5/pgtype/multirange.go
new file mode 100644
index 0000000..e576378
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/multirange.go
@@ -0,0 +1,443 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "reflect"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// MultirangeGetter is a type that can be converted into a PostgreSQL multirange.
+type MultirangeGetter interface {
+ // IsNull returns true if the value is SQL NULL.
+ IsNull() bool
+
+ // Len returns the number of elements in the multirange.
+ Len() int
+
+ // Index returns the element at i.
+ Index(i int) any
+
+ // IndexType returns a non-nil scan target of the type Index will return. This is used by MultirangeCodec.PlanEncode.
+ IndexType() any
+}
+
+// MultirangeSetter is a type can be set from a PostgreSQL multirange.
+type MultirangeSetter interface {
+ // ScanNull sets the value to SQL NULL.
+ ScanNull() error
+
+ // SetLen prepares the value such that ScanIndex can be called for each element. This will remove any existing
+ // elements.
+ SetLen(n int) error
+
+ // ScanIndex returns a value usable as a scan target for i. SetLen must be called before ScanIndex.
+ ScanIndex(i int) any
+
+ // ScanIndexType returns a non-nil scan target of the type ScanIndex will return. This is used by
+ // MultirangeCodec.PlanScan.
+ ScanIndexType() any
+}
+
+// MultirangeCodec is a codec for any multirange type.
+type MultirangeCodec struct {
+ ElementType *Type
+}
+
+func (c *MultirangeCodec) FormatSupported(format int16) bool {
+ return c.ElementType.Codec.FormatSupported(format)
+}
+
+func (c *MultirangeCodec) PreferredFormat() int16 {
+ return c.ElementType.Codec.PreferredFormat()
+}
+
+func (c *MultirangeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ multirangeValuer, ok := value.(MultirangeGetter)
+ if !ok {
+ return nil
+ }
+
+ elementType := multirangeValuer.IndexType()
+
+ elementEncodePlan := m.PlanEncode(c.ElementType.OID, format, elementType)
+ if elementEncodePlan == nil {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return &encodePlanMultirangeCodecBinary{ac: c, m: m, oid: oid}
+ case TextFormatCode:
+ return &encodePlanMultirangeCodecText{ac: c, m: m, oid: oid}
+ }
+
+ return nil
+}
+
+type encodePlanMultirangeCodecText struct {
+ ac *MultirangeCodec
+ m *Map
+ oid uint32
+}
+
+func (p *encodePlanMultirangeCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ multirange := value.(MultirangeGetter)
+
+ if multirange.IsNull() {
+ return nil, nil
+ }
+
+ elementCount := multirange.Len()
+
+ buf = append(buf, '{')
+
+ var encodePlan EncodePlan
+ var lastElemType reflect.Type
+ inElemBuf := make([]byte, 0, 32)
+ for i := 0; i < elementCount; i++ {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ elem := multirange.Index(i)
+ var elemBuf []byte
+ if elem != nil {
+ elemType := reflect.TypeOf(elem)
+ if lastElemType != elemType {
+ lastElemType = elemType
+ encodePlan = p.m.PlanEncode(p.ac.ElementType.OID, TextFormatCode, elem)
+ if encodePlan == nil {
+ return nil, fmt.Errorf("unable to encode %v", multirange.Index(i))
+ }
+ }
+ elemBuf, err = encodePlan.Encode(elem, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if elemBuf == nil {
+ return nil, fmt.Errorf("multirange cannot contain NULL element")
+ } else {
+ buf = append(buf, elemBuf...)
+ }
+ }
+
+ buf = append(buf, '}')
+
+ return buf, nil
+}
+
+type encodePlanMultirangeCodecBinary struct {
+ ac *MultirangeCodec
+ m *Map
+ oid uint32
+}
+
+func (p *encodePlanMultirangeCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ multirange := value.(MultirangeGetter)
+
+ if multirange.IsNull() {
+ return nil, nil
+ }
+
+ elementCount := multirange.Len()
+
+ buf = pgio.AppendInt32(buf, int32(elementCount))
+
+ var encodePlan EncodePlan
+ var lastElemType reflect.Type
+ for i := 0; i < elementCount; i++ {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elem := multirange.Index(i)
+ var elemBuf []byte
+ if elem != nil {
+ elemType := reflect.TypeOf(elem)
+ if lastElemType != elemType {
+ lastElemType = elemType
+ encodePlan = p.m.PlanEncode(p.ac.ElementType.OID, BinaryFormatCode, elem)
+ if encodePlan == nil {
+ return nil, fmt.Errorf("unable to encode %v", multirange.Index(i))
+ }
+ }
+ elemBuf, err = encodePlan.Encode(elem, buf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if elemBuf == nil {
+ return nil, fmt.Errorf("multirange cannot contain NULL element")
+ } else {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+func (c *MultirangeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ multirangeScanner, ok := target.(MultirangeSetter)
+ if !ok {
+ return nil
+ }
+
+ elementType := multirangeScanner.ScanIndexType()
+
+ elementScanPlan := m.PlanScan(c.ElementType.OID, format, elementType)
+ if _, ok := elementScanPlan.(*scanPlanFail); ok {
+ return nil
+ }
+
+ return &scanPlanMultirangeCodec{
+ multirangeCodec: c,
+ m: m,
+ oid: oid,
+ formatCode: format,
+ }
+}
+
+func (c *MultirangeCodec) decodeBinary(m *Map, multirangeOID uint32, src []byte, multirange MultirangeSetter) error {
+ rp := 0
+
+ elementCount := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ err := multirange.SetLen(elementCount)
+ if err != nil {
+ return err
+ }
+
+ if elementCount == 0 {
+ return nil
+ }
+
+ elementScanPlan := c.ElementType.Codec.PlanScan(m, c.ElementType.OID, BinaryFormatCode, multirange.ScanIndex(0))
+ if elementScanPlan == nil {
+ elementScanPlan = m.PlanScan(c.ElementType.OID, BinaryFormatCode, multirange.ScanIndex(0))
+ }
+
+ for i := 0; i < elementCount; i++ {
+ elem := multirange.ScanIndex(i)
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elementScanPlan.Scan(elemSrc, elem)
+ if err != nil {
+ return fmt.Errorf("failed to scan multirange element %d: %w", i, err)
+ }
+ }
+
+ return nil
+}
+
+func (c *MultirangeCodec) decodeText(m *Map, multirangeOID uint32, src []byte, multirange MultirangeSetter) error {
+ elements, err := parseUntypedTextMultirange(src)
+ if err != nil {
+ return err
+ }
+
+ err = multirange.SetLen(len(elements))
+ if err != nil {
+ return err
+ }
+
+ if len(elements) == 0 {
+ return nil
+ }
+
+ elementScanPlan := c.ElementType.Codec.PlanScan(m, c.ElementType.OID, TextFormatCode, multirange.ScanIndex(0))
+ if elementScanPlan == nil {
+ elementScanPlan = m.PlanScan(c.ElementType.OID, TextFormatCode, multirange.ScanIndex(0))
+ }
+
+ for i, s := range elements {
+ elem := multirange.ScanIndex(i)
+ err = elementScanPlan.Scan([]byte(s), elem)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type scanPlanMultirangeCodec struct {
+ multirangeCodec *MultirangeCodec
+ m *Map
+ oid uint32
+ formatCode int16
+ elementScanPlan ScanPlan
+}
+
+func (spac *scanPlanMultirangeCodec) Scan(src []byte, dst any) error {
+ c := spac.multirangeCodec
+ m := spac.m
+ oid := spac.oid
+ formatCode := spac.formatCode
+
+ multirange := dst.(MultirangeSetter)
+
+ if src == nil {
+ return multirange.ScanNull()
+ }
+
+ switch formatCode {
+ case BinaryFormatCode:
+ return c.decodeBinary(m, oid, src, multirange)
+ case TextFormatCode:
+ return c.decodeText(m, oid, src, multirange)
+ default:
+ return fmt.Errorf("unknown format code %d", formatCode)
+ }
+}
+
+func (c *MultirangeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (c *MultirangeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var multirange Multirange[Range[any]]
+ err := m.PlanScan(oid, format, &multirange).Scan(src, &multirange)
+ return multirange, err
+}
+
+func parseUntypedTextMultirange(src []byte) ([]string, error) {
+ elements := make([]string, 0)
+
+ buf := bytes.NewBuffer(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r != '{' {
+ return nil, fmt.Errorf("invalid multirange, expected '{' got %v", r)
+ }
+
+parseValueLoop:
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid multirange: %w", err)
+ }
+
+ switch r {
+ case ',': // skip range separator
+ case '}':
+ break parseValueLoop
+ default:
+ buf.UnreadRune()
+ value, err := parseRange(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid multirange value: %w", err)
+ }
+ elements = append(elements, value)
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, fmt.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ return elements, nil
+
+}
+
+func parseRange(buf *bytes.Buffer) (string, error) {
+ s := &bytes.Buffer{}
+
+ boundSepRead := false
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case ',', '}':
+ if r == ',' && !boundSepRead {
+ boundSepRead = true
+ break
+ }
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+// Multirange is a generic multirange type.
+//
+// T should implement RangeValuer and *T should implement RangeScanner. However, there does not appear to be a way to
+// enforce the RangeScanner constraint.
+type Multirange[T RangeValuer] []T
+
+func (r Multirange[T]) IsNull() bool {
+ return r == nil
+}
+
+func (r Multirange[T]) Len() int {
+ return len(r)
+}
+
+func (r Multirange[T]) Index(i int) any {
+ return r[i]
+}
+
+func (r Multirange[T]) IndexType() any {
+ var zero T
+ return zero
+}
+
+func (r *Multirange[T]) ScanNull() error {
+ *r = nil
+ return nil
+}
+
+func (r *Multirange[T]) SetLen(n int) error {
+ *r = make([]T, n)
+ return nil
+}
+
+func (r Multirange[T]) ScanIndex(i int) any {
+ return &r[i]
+}
+
+func (r Multirange[T]) ScanIndexType() any {
+ return new(T)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/numeric.go b/vendor/github.com/jackc/pgx/v5/pgtype/numeric.go
new file mode 100644
index 0000000..4dbec78
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/numeric.go
@@ -0,0 +1,823 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// PostgreSQL internal numeric storage uses 16-bit "digits" with base of 10,000
+const nbase = 10000
+
+const (
+ pgNumericNaN = 0x00000000c0000000
+ pgNumericNaNSign = 0xc000
+
+ pgNumericPosInf = 0x00000000d0000000
+ pgNumericPosInfSign = 0xd000
+
+ pgNumericNegInf = 0x00000000f0000000
+ pgNumericNegInfSign = 0xf000
+)
+
+var big0 *big.Int = big.NewInt(0)
+var big1 *big.Int = big.NewInt(1)
+var big10 *big.Int = big.NewInt(10)
+var big100 *big.Int = big.NewInt(100)
+var big1000 *big.Int = big.NewInt(1000)
+
+var bigNBase *big.Int = big.NewInt(nbase)
+var bigNBaseX2 *big.Int = big.NewInt(nbase * nbase)
+var bigNBaseX3 *big.Int = big.NewInt(nbase * nbase * nbase)
+var bigNBaseX4 *big.Int = big.NewInt(nbase * nbase * nbase * nbase)
+
+type NumericScanner interface {
+ ScanNumeric(v Numeric) error
+}
+
+type NumericValuer interface {
+ NumericValue() (Numeric, error)
+}
+
+type Numeric struct {
+ Int *big.Int
+ Exp int32
+ NaN bool
+ InfinityModifier InfinityModifier
+ Valid bool
+}
+
+func (n *Numeric) ScanNumeric(v Numeric) error {
+ *n = v
+ return nil
+}
+
+func (n Numeric) NumericValue() (Numeric, error) {
+ return n, nil
+}
+
+func (n Numeric) Float64Value() (Float8, error) {
+ if !n.Valid {
+ return Float8{}, nil
+ } else if n.NaN {
+ return Float8{Float64: math.NaN(), Valid: true}, nil
+ } else if n.InfinityModifier == Infinity {
+ return Float8{Float64: math.Inf(1), Valid: true}, nil
+ } else if n.InfinityModifier == NegativeInfinity {
+ return Float8{Float64: math.Inf(-1), Valid: true}, nil
+ }
+
+ buf := make([]byte, 0, 32)
+
+ if n.Int == nil {
+ buf = append(buf, '0')
+ } else {
+ buf = append(buf, n.Int.String()...)
+ }
+ buf = append(buf, 'e')
+ buf = append(buf, strconv.FormatInt(int64(n.Exp), 10)...)
+
+ f, err := strconv.ParseFloat(string(buf), 64)
+ if err != nil {
+ return Float8{}, err
+ }
+
+ return Float8{Float64: f, Valid: true}, nil
+}
+
+func (n *Numeric) ScanInt64(v Int8) error {
+ if !v.Valid {
+ *n = Numeric{}
+ return nil
+ }
+
+ *n = Numeric{Int: big.NewInt(v.Int64), Valid: true}
+ return nil
+}
+
+func (n Numeric) Int64Value() (Int8, error) {
+ if !n.Valid {
+ return Int8{}, nil
+ }
+
+ bi, err := n.toBigInt()
+ if err != nil {
+ return Int8{}, err
+ }
+
+ if !bi.IsInt64() {
+ return Int8{}, fmt.Errorf("cannot convert %v to int64", n)
+ }
+
+ return Int8{Int64: bi.Int64(), Valid: true}, nil
+}
+
+func (n *Numeric) ScanScientific(src string) error {
+ if !strings.ContainsAny("eE", src) {
+ return scanPlanTextAnyToNumericScanner{}.Scan([]byte(src), n)
+ }
+
+ if bigF, ok := new(big.Float).SetString(string(src)); ok {
+ smallF, _ := bigF.Float64()
+ src = strconv.FormatFloat(smallF, 'f', -1, 64)
+ }
+
+ num, exp, err := parseNumericString(src)
+ if err != nil {
+ return err
+ }
+
+ *n = Numeric{Int: num, Exp: exp, Valid: true}
+
+ return nil
+}
+
+func (n *Numeric) toBigInt() (*big.Int, error) {
+ if n.Exp == 0 {
+ return n.Int, nil
+ }
+
+ num := &big.Int{}
+ num.Set(n.Int)
+ if n.Exp > 0 {
+ mul := &big.Int{}
+ mul.Exp(big10, big.NewInt(int64(n.Exp)), nil)
+ num.Mul(num, mul)
+ return num, nil
+ }
+
+ div := &big.Int{}
+ div.Exp(big10, big.NewInt(int64(-n.Exp)), nil)
+ remainder := &big.Int{}
+ num.DivMod(num, div, remainder)
+ if remainder.Cmp(big0) != 0 {
+ return nil, fmt.Errorf("cannot convert %v to integer", n)
+ }
+ return num, nil
+}
+
+func parseNumericString(str string) (n *big.Int, exp int32, err error) {
+ idx := strings.IndexByte(str, '.')
+
+ if idx == -1 {
+ for len(str) > 1 && str[len(str)-1] == '0' && str[len(str)-2] != '-' {
+ str = str[:len(str)-1]
+ exp++
+ }
+ } else {
+ exp = int32(-(len(str) - idx - 1))
+ str = str[:idx] + str[idx+1:]
+ }
+
+ accum := &big.Int{}
+ if _, ok := accum.SetString(str, 10); !ok {
+ return nil, 0, fmt.Errorf("%s is not a number", str)
+ }
+
+ return accum, exp, nil
+}
+
+func nbaseDigitsToInt64(src []byte) (accum int64, bytesRead, digitsRead int) {
+ digits := len(src) / 2
+ if digits > 4 {
+ digits = 4
+ }
+
+ rp := 0
+
+ for i := 0; i < digits; i++ {
+ if i > 0 {
+ accum *= nbase
+ }
+ accum += int64(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+
+ return accum, rp, digits
+}
+
+// Scan implements the database/sql Scanner interface.
+func (n *Numeric) Scan(src any) error {
+ if src == nil {
+ *n = Numeric{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToNumericScanner{}.Scan([]byte(src), n)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (n Numeric) Value() (driver.Value, error) {
+ if !n.Valid {
+ return nil, nil
+ }
+
+ buf, err := NumericCodec{}.PlanEncode(nil, 0, TextFormatCode, n).Encode(n, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+func (n Numeric) MarshalJSON() ([]byte, error) {
+ if !n.Valid {
+ return []byte("null"), nil
+ }
+
+ if n.NaN {
+ return []byte(`"NaN"`), nil
+ }
+
+ return n.numberTextBytes(), nil
+}
+
+func (n *Numeric) UnmarshalJSON(src []byte) error {
+ if bytes.Equal(src, []byte(`null`)) {
+ *n = Numeric{}
+ return nil
+ }
+ if bytes.Equal(src, []byte(`"NaN"`)) {
+ *n = Numeric{NaN: true, Valid: true}
+ return nil
+ }
+ return scanPlanTextAnyToNumericScanner{}.Scan(src, n)
+}
+
+// numberString returns a string of the number. undefined if NaN, infinite, or NULL
+func (n Numeric) numberTextBytes() []byte {
+ intStr := n.Int.String()
+
+ buf := &bytes.Buffer{}
+
+ if len(intStr) > 0 && intStr[:1] == "-" {
+ intStr = intStr[1:]
+ buf.WriteByte('-')
+ }
+
+ exp := int(n.Exp)
+ if exp > 0 {
+ buf.WriteString(intStr)
+ for i := 0; i < exp; i++ {
+ buf.WriteByte('0')
+ }
+ } else if exp < 0 {
+ if len(intStr) <= -exp {
+ buf.WriteString("0.")
+ leadingZeros := -exp - len(intStr)
+ for i := 0; i < leadingZeros; i++ {
+ buf.WriteByte('0')
+ }
+ buf.WriteString(intStr)
+ } else if len(intStr) > -exp {
+ dpPos := len(intStr) + exp
+ buf.WriteString(intStr[:dpPos])
+ buf.WriteByte('.')
+ buf.WriteString(intStr[dpPos:])
+ }
+ } else {
+ buf.WriteString(intStr)
+ }
+
+ return buf.Bytes()
+}
+
+type NumericCodec struct{}
+
+func (NumericCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (NumericCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (NumericCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case NumericValuer:
+ return encodePlanNumericCodecBinaryNumericValuer{}
+ case Float64Valuer:
+ return encodePlanNumericCodecBinaryFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanNumericCodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case NumericValuer:
+ return encodePlanNumericCodecTextNumericValuer{}
+ case Float64Valuer:
+ return encodePlanNumericCodecTextFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanNumericCodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanNumericCodecBinaryNumericValuer struct{}
+
+func (encodePlanNumericCodecBinaryNumericValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(NumericValuer).NumericValue()
+ if err != nil {
+ return nil, err
+ }
+
+ return encodeNumericBinary(n, buf)
+}
+
+type encodePlanNumericCodecBinaryFloat64Valuer struct{}
+
+func (encodePlanNumericCodecBinaryFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if math.IsNaN(n.Float64) {
+ return encodeNumericBinary(Numeric{NaN: true, Valid: true}, buf)
+ } else if math.IsInf(n.Float64, 1) {
+ return encodeNumericBinary(Numeric{InfinityModifier: Infinity, Valid: true}, buf)
+ } else if math.IsInf(n.Float64, -1) {
+ return encodeNumericBinary(Numeric{InfinityModifier: NegativeInfinity, Valid: true}, buf)
+ }
+ num, exp, err := parseNumericString(strconv.FormatFloat(n.Float64, 'f', -1, 64))
+ if err != nil {
+ return nil, err
+ }
+
+ return encodeNumericBinary(Numeric{Int: num, Exp: exp, Valid: true}, buf)
+}
+
+type encodePlanNumericCodecBinaryInt64Valuer struct{}
+
+func (encodePlanNumericCodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return encodeNumericBinary(Numeric{Int: big.NewInt(n.Int64), Valid: true}, buf)
+}
+
+func encodeNumericBinary(n Numeric, buf []byte) (newBuf []byte, err error) {
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.NaN {
+ buf = pgio.AppendUint64(buf, pgNumericNaN)
+ return buf, nil
+ } else if n.InfinityModifier == Infinity {
+ buf = pgio.AppendUint64(buf, pgNumericPosInf)
+ return buf, nil
+ } else if n.InfinityModifier == NegativeInfinity {
+ buf = pgio.AppendUint64(buf, pgNumericNegInf)
+ return buf, nil
+ }
+
+ var sign int16
+ if n.Int.Cmp(big0) < 0 {
+ sign = 16384
+ }
+
+ absInt := &big.Int{}
+ wholePart := &big.Int{}
+ fracPart := &big.Int{}
+ remainder := &big.Int{}
+ absInt.Abs(n.Int)
+
+ // Normalize absInt and exp to where exp is always a multiple of 4. This makes
+ // converting to 16-bit base 10,000 digits easier.
+ var exp int32
+ switch n.Exp % 4 {
+ case 1, -3:
+ exp = n.Exp - 1
+ absInt.Mul(absInt, big10)
+ case 2, -2:
+ exp = n.Exp - 2
+ absInt.Mul(absInt, big100)
+ case 3, -1:
+ exp = n.Exp - 3
+ absInt.Mul(absInt, big1000)
+ default:
+ exp = n.Exp
+ }
+
+ if exp < 0 {
+ divisor := &big.Int{}
+ divisor.Exp(big10, big.NewInt(int64(-exp)), nil)
+ wholePart.DivMod(absInt, divisor, fracPart)
+ fracPart.Add(fracPart, divisor)
+ } else {
+ wholePart = absInt
+ }
+
+ var wholeDigits, fracDigits []int16
+
+ for wholePart.Cmp(big0) != 0 {
+ wholePart.DivMod(wholePart, bigNBase, remainder)
+ wholeDigits = append(wholeDigits, int16(remainder.Int64()))
+ }
+
+ if fracPart.Cmp(big0) != 0 {
+ for fracPart.Cmp(big1) != 0 {
+ fracPart.DivMod(fracPart, bigNBase, remainder)
+ fracDigits = append(fracDigits, int16(remainder.Int64()))
+ }
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(wholeDigits)+len(fracDigits)))
+
+ var weight int16
+ if len(wholeDigits) > 0 {
+ weight = int16(len(wholeDigits) - 1)
+ if exp > 0 {
+ weight += int16(exp / 4)
+ }
+ } else {
+ weight = int16(exp/4) - 1 + int16(len(fracDigits))
+ }
+ buf = pgio.AppendInt16(buf, weight)
+
+ buf = pgio.AppendInt16(buf, sign)
+
+ var dscale int16
+ if n.Exp < 0 {
+ dscale = int16(-n.Exp)
+ }
+ buf = pgio.AppendInt16(buf, dscale)
+
+ for i := len(wholeDigits) - 1; i >= 0; i-- {
+ buf = pgio.AppendInt16(buf, wholeDigits[i])
+ }
+
+ for i := len(fracDigits) - 1; i >= 0; i-- {
+ buf = pgio.AppendInt16(buf, fracDigits[i])
+ }
+
+ return buf, nil
+}
+
+type encodePlanNumericCodecTextNumericValuer struct{}
+
+func (encodePlanNumericCodecTextNumericValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(NumericValuer).NumericValue()
+ if err != nil {
+ return nil, err
+ }
+
+ return encodeNumericText(n, buf)
+}
+
+type encodePlanNumericCodecTextFloat64Valuer struct{}
+
+func (encodePlanNumericCodecTextFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if math.IsNaN(n.Float64) {
+ buf = append(buf, "NaN"...)
+ } else if math.IsInf(n.Float64, 1) {
+ buf = append(buf, "Infinity"...)
+ } else if math.IsInf(n.Float64, -1) {
+ buf = append(buf, "-Infinity"...)
+ } else {
+ buf = append(buf, strconv.FormatFloat(n.Float64, 'f', -1, 64)...)
+ }
+ return buf, nil
+}
+
+type encodePlanNumericCodecTextInt64Valuer struct{}
+
+func (encodePlanNumericCodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, strconv.FormatInt(n.Int64, 10)...)
+ return buf, nil
+}
+
+func encodeNumericText(n Numeric, buf []byte) (newBuf []byte, err error) {
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.NaN {
+ buf = append(buf, "NaN"...)
+ return buf, nil
+ } else if n.InfinityModifier == Infinity {
+ buf = append(buf, "Infinity"...)
+ return buf, nil
+ } else if n.InfinityModifier == NegativeInfinity {
+ buf = append(buf, "-Infinity"...)
+ return buf, nil
+ }
+
+ buf = append(buf, n.numberTextBytes()...)
+
+ return buf, nil
+}
+
+func (NumericCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case NumericScanner:
+ return scanPlanBinaryNumericToNumericScanner{}
+ case Float64Scanner:
+ return scanPlanBinaryNumericToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanBinaryNumericToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryNumericToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case NumericScanner:
+ return scanPlanTextAnyToNumericScanner{}
+ case Float64Scanner:
+ return scanPlanTextAnyToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryNumericToNumericScanner struct{}
+
+func (scanPlanBinaryNumericToNumericScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(NumericScanner)
+
+ if src == nil {
+ return scanner.ScanNumeric(Numeric{})
+ }
+
+ if len(src) < 8 {
+ return fmt.Errorf("numeric incomplete %v", src)
+ }
+
+ rp := 0
+ ndigits := binary.BigEndian.Uint16(src[rp:])
+ rp += 2
+ weight := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ sign := binary.BigEndian.Uint16(src[rp:])
+ rp += 2
+ dscale := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if sign == pgNumericNaNSign {
+ return scanner.ScanNumeric(Numeric{NaN: true, Valid: true})
+ } else if sign == pgNumericPosInfSign {
+ return scanner.ScanNumeric(Numeric{InfinityModifier: Infinity, Valid: true})
+ } else if sign == pgNumericNegInfSign {
+ return scanner.ScanNumeric(Numeric{InfinityModifier: NegativeInfinity, Valid: true})
+ }
+
+ if ndigits == 0 {
+ return scanner.ScanNumeric(Numeric{Int: big.NewInt(0), Valid: true})
+ }
+
+ if len(src[rp:]) < int(ndigits)*2 {
+ return fmt.Errorf("numeric incomplete %v", src)
+ }
+
+ accum := &big.Int{}
+
+ for i := 0; i < int(ndigits+3)/4; i++ {
+ int64accum, bytesRead, digitsRead := nbaseDigitsToInt64(src[rp:])
+ rp += bytesRead
+
+ if i > 0 {
+ var mul *big.Int
+ switch digitsRead {
+ case 1:
+ mul = bigNBase
+ case 2:
+ mul = bigNBaseX2
+ case 3:
+ mul = bigNBaseX3
+ case 4:
+ mul = bigNBaseX4
+ default:
+ return fmt.Errorf("invalid digitsRead: %d (this can't happen)", digitsRead)
+ }
+ accum.Mul(accum, mul)
+ }
+
+ accum.Add(accum, big.NewInt(int64accum))
+ }
+
+ exp := (int32(weight) - int32(ndigits) + 1) * 4
+
+ if dscale > 0 {
+ fracNBaseDigits := int16(int32(ndigits) - int32(weight) - 1)
+ fracDecimalDigits := fracNBaseDigits * 4
+
+ if dscale > fracDecimalDigits {
+ multCount := int(dscale - fracDecimalDigits)
+ for i := 0; i < multCount; i++ {
+ accum.Mul(accum, big10)
+ exp--
+ }
+ } else if dscale < fracDecimalDigits {
+ divCount := int(fracDecimalDigits - dscale)
+ for i := 0; i < divCount; i++ {
+ accum.Div(accum, big10)
+ exp++
+ }
+ }
+ }
+
+ reduced := &big.Int{}
+ remainder := &big.Int{}
+ if exp >= 0 {
+ for {
+ reduced.DivMod(accum, big10, remainder)
+ if remainder.Cmp(big0) != 0 {
+ break
+ }
+ accum.Set(reduced)
+ exp++
+ }
+ }
+
+ if sign != 0 {
+ accum.Neg(accum)
+ }
+
+ return scanner.ScanNumeric(Numeric{Int: accum, Exp: exp, Valid: true})
+}
+
+type scanPlanBinaryNumericToFloat64Scanner struct{}
+
+func (scanPlanBinaryNumericToFloat64Scanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(Float64Scanner)
+
+ if src == nil {
+ return scanner.ScanFloat64(Float8{})
+ }
+
+ var n Numeric
+
+ err := scanPlanBinaryNumericToNumericScanner{}.Scan(src, &n)
+ if err != nil {
+ return err
+ }
+
+ f8, err := n.Float64Value()
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanFloat64(f8)
+}
+
+type scanPlanBinaryNumericToInt64Scanner struct{}
+
+func (scanPlanBinaryNumericToInt64Scanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(Int64Scanner)
+
+ if src == nil {
+ return scanner.ScanInt64(Int8{})
+ }
+
+ var n Numeric
+
+ err := scanPlanBinaryNumericToNumericScanner{}.Scan(src, &n)
+ if err != nil {
+ return err
+ }
+
+ bigInt, err := n.toBigInt()
+ if err != nil {
+ return err
+ }
+
+ if !bigInt.IsInt64() {
+ return fmt.Errorf("%v is out of range for int64", bigInt)
+ }
+
+ return scanner.ScanInt64(Int8{Int64: bigInt.Int64(), Valid: true})
+}
+
+type scanPlanBinaryNumericToTextScanner struct{}
+
+func (scanPlanBinaryNumericToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ var n Numeric
+
+ err := scanPlanBinaryNumericToNumericScanner{}.Scan(src, &n)
+ if err != nil {
+ return err
+ }
+
+ sbuf, err := encodeNumericText(n, nil)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanText(Text{String: string(sbuf), Valid: true})
+}
+
+type scanPlanTextAnyToNumericScanner struct{}
+
+func (scanPlanTextAnyToNumericScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(NumericScanner)
+
+ if src == nil {
+ return scanner.ScanNumeric(Numeric{})
+ }
+
+ if string(src) == "NaN" {
+ return scanner.ScanNumeric(Numeric{NaN: true, Valid: true})
+ } else if string(src) == "Infinity" {
+ return scanner.ScanNumeric(Numeric{InfinityModifier: Infinity, Valid: true})
+ } else if string(src) == "-Infinity" {
+ return scanner.ScanNumeric(Numeric{InfinityModifier: NegativeInfinity, Valid: true})
+ }
+
+ num, exp, err := parseNumericString(string(src))
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanNumeric(Numeric{Int: num, Exp: exp, Valid: true})
+}
+
+func (c NumericCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ if format == TextFormatCode {
+ return string(src), nil
+ }
+
+ var n Numeric
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+
+ buf, err := m.Encode(oid, TextFormatCode, n, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), nil
+}
+
+func (c NumericCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n Numeric
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/path.go b/vendor/github.com/jackc/pgx/v5/pgtype/path.go
new file mode 100644
index 0000000..73e0ec5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/path.go
@@ -0,0 +1,272 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type PathScanner interface {
+ ScanPath(v Path) error
+}
+
+type PathValuer interface {
+ PathValue() (Path, error)
+}
+
+type Path struct {
+ P []Vec2
+ Closed bool
+ Valid bool
+}
+
+func (path *Path) ScanPath(v Path) error {
+ *path = v
+ return nil
+}
+
+func (path Path) PathValue() (Path, error) {
+ return path, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (path *Path) Scan(src any) error {
+ if src == nil {
+ *path = Path{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToPathScanner{}.Scan([]byte(src), path)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (path Path) Value() (driver.Value, error) {
+ if !path.Valid {
+ return nil, nil
+ }
+
+ buf, err := PathCodec{}.PlanEncode(nil, 0, TextFormatCode, path).Encode(path, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return string(buf), err
+}
+
+type PathCodec struct{}
+
+func (PathCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (PathCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (PathCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(PathValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanPathCodecBinary{}
+ case TextFormatCode:
+ return encodePlanPathCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanPathCodecBinary struct{}
+
+func (encodePlanPathCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ path, err := value.(PathValuer).PathValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !path.Valid {
+ return nil, nil
+ }
+
+ var closeByte byte
+ if path.Closed {
+ closeByte = 1
+ }
+ buf = append(buf, closeByte)
+
+ buf = pgio.AppendInt32(buf, int32(len(path.P)))
+
+ for _, p := range path.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ }
+
+ return buf, nil
+}
+
+type encodePlanPathCodecText struct{}
+
+func (encodePlanPathCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ path, err := value.(PathValuer).PathValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !path.Valid {
+ return nil, nil
+ }
+
+ var startByte, endByte byte
+ if path.Closed {
+ startByte = '('
+ endByte = ')'
+ } else {
+ startByte = '['
+ endByte = ']'
+ }
+ buf = append(buf, startByte)
+
+ for i, p := range path.P {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, fmt.Sprintf(`(%s,%s)`,
+ strconv.FormatFloat(p.X, 'f', -1, 64),
+ strconv.FormatFloat(p.Y, 'f', -1, 64),
+ )...)
+ }
+
+ buf = append(buf, endByte)
+
+ return buf, nil
+}
+
+func (PathCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case PathScanner:
+ return scanPlanBinaryPathToPathScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case PathScanner:
+ return scanPlanTextAnyToPathScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryPathToPathScanner struct{}
+
+func (scanPlanBinaryPathToPathScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PathScanner)
+
+ if src == nil {
+ return scanner.ScanPath(Path{})
+ }
+
+ if len(src) < 5 {
+ return fmt.Errorf("invalid length for Path: %v", len(src))
+ }
+
+ closed := src[0] == 1
+ pointCount := int(binary.BigEndian.Uint32(src[1:]))
+
+ rp := 5
+
+ if 5+pointCount*16 != len(src) {
+ return fmt.Errorf("invalid length for Path with %d points: %v", pointCount, len(src))
+ }
+
+ points := make([]Vec2, pointCount)
+ for i := 0; i < len(points); i++ {
+ x := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ y := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ points[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}
+ }
+
+ return scanner.ScanPath(Path{
+ P: points,
+ Closed: closed,
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToPathScanner struct{}
+
+func (scanPlanTextAnyToPathScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PathScanner)
+
+ if src == nil {
+ return scanner.ScanPath(Path{})
+ }
+
+ if len(src) < 7 {
+ return fmt.Errorf("invalid length for Path: %v", len(src))
+ }
+
+ closed := src[0] == '('
+ points := make([]Vec2, 0)
+
+ str := string(src[2:])
+
+ for {
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ points = append(points, Vec2{x, y})
+
+ if end+3 < len(str) {
+ str = str[end+3:]
+ } else {
+ break
+ }
+ }
+
+ return scanner.ScanPath(Path{P: points, Closed: closed, Valid: true})
+}
+
+func (c PathCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c PathCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var path Path
+ err := codecScan(c, m, oid, format, src, &path)
+ if err != nil {
+ return nil, err
+ }
+ return path, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
new file mode 100644
index 0000000..22cf66d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
@@ -0,0 +1,2040 @@
+package pgtype
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "reflect"
+ "time"
+)
+
+// PostgreSQL oids for common types
+const (
+ BoolOID = 16
+ ByteaOID = 17
+ QCharOID = 18
+ NameOID = 19
+ Int8OID = 20
+ Int2OID = 21
+ Int4OID = 23
+ TextOID = 25
+ OIDOID = 26
+ TIDOID = 27
+ XIDOID = 28
+ CIDOID = 29
+ JSONOID = 114
+ XMLOID = 142
+ XMLArrayOID = 143
+ JSONArrayOID = 199
+ XID8ArrayOID = 271
+ PointOID = 600
+ LsegOID = 601
+ PathOID = 602
+ BoxOID = 603
+ PolygonOID = 604
+ LineOID = 628
+ LineArrayOID = 629
+ CIDROID = 650
+ CIDRArrayOID = 651
+ Float4OID = 700
+ Float8OID = 701
+ CircleOID = 718
+ CircleArrayOID = 719
+ UnknownOID = 705
+ Macaddr8OID = 774
+ MacaddrOID = 829
+ InetOID = 869
+ BoolArrayOID = 1000
+ QCharArrayOID = 1002
+ NameArrayOID = 1003
+ Int2ArrayOID = 1005
+ Int4ArrayOID = 1007
+ TextArrayOID = 1009
+ TIDArrayOID = 1010
+ ByteaArrayOID = 1001
+ XIDArrayOID = 1011
+ CIDArrayOID = 1012
+ BPCharArrayOID = 1014
+ VarcharArrayOID = 1015
+ Int8ArrayOID = 1016
+ PointArrayOID = 1017
+ LsegArrayOID = 1018
+ PathArrayOID = 1019
+ BoxArrayOID = 1020
+ Float4ArrayOID = 1021
+ Float8ArrayOID = 1022
+ PolygonArrayOID = 1027
+ OIDArrayOID = 1028
+ ACLItemOID = 1033
+ ACLItemArrayOID = 1034
+ MacaddrArrayOID = 1040
+ InetArrayOID = 1041
+ BPCharOID = 1042
+ VarcharOID = 1043
+ DateOID = 1082
+ TimeOID = 1083
+ TimestampOID = 1114
+ TimestampArrayOID = 1115
+ DateArrayOID = 1182
+ TimeArrayOID = 1183
+ TimestamptzOID = 1184
+ TimestamptzArrayOID = 1185
+ IntervalOID = 1186
+ IntervalArrayOID = 1187
+ NumericArrayOID = 1231
+ TimetzOID = 1266
+ TimetzArrayOID = 1270
+ BitOID = 1560
+ BitArrayOID = 1561
+ VarbitOID = 1562
+ VarbitArrayOID = 1563
+ NumericOID = 1700
+ RecordOID = 2249
+ RecordArrayOID = 2287
+ UUIDOID = 2950
+ UUIDArrayOID = 2951
+ JSONBOID = 3802
+ JSONBArrayOID = 3807
+ DaterangeOID = 3912
+ DaterangeArrayOID = 3913
+ Int4rangeOID = 3904
+ Int4rangeArrayOID = 3905
+ NumrangeOID = 3906
+ NumrangeArrayOID = 3907
+ TsrangeOID = 3908
+ TsrangeArrayOID = 3909
+ TstzrangeOID = 3910
+ TstzrangeArrayOID = 3911
+ Int8rangeOID = 3926
+ Int8rangeArrayOID = 3927
+ JSONPathOID = 4072
+ JSONPathArrayOID = 4073
+ Int4multirangeOID = 4451
+ NummultirangeOID = 4532
+ TsmultirangeOID = 4533
+ TstzmultirangeOID = 4534
+ DatemultirangeOID = 4535
+ Int8multirangeOID = 4536
+ XID8OID = 5069
+ Int4multirangeArrayOID = 6150
+ NummultirangeArrayOID = 6151
+ TsmultirangeArrayOID = 6152
+ TstzmultirangeArrayOID = 6153
+ DatemultirangeArrayOID = 6155
+ Int8multirangeArrayOID = 6157
+)
+
+type InfinityModifier int8
+
+const (
+ Infinity InfinityModifier = 1
+ Finite InfinityModifier = 0
+ NegativeInfinity InfinityModifier = -Infinity
+)
+
+func (im InfinityModifier) String() string {
+ switch im {
+ case Finite:
+ return "finite"
+ case Infinity:
+ return "infinity"
+ case NegativeInfinity:
+ return "-infinity"
+ default:
+ return "invalid"
+ }
+}
+
+// PostgreSQL format codes
+const (
+ TextFormatCode = 0
+ BinaryFormatCode = 1
+)
+
+// A Codec converts between Go and PostgreSQL values. A Codec must not be mutated after it is registered with a Map.
+type Codec interface {
+ // FormatSupported returns true if the format is supported.
+ FormatSupported(int16) bool
+
+ // PreferredFormat returns the preferred format.
+ PreferredFormat() int16
+
+ // PlanEncode returns an EncodePlan for encoding value into PostgreSQL format for oid and format. If no plan can be
+ // found then nil is returned.
+ PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan
+
+ // PlanScan returns a ScanPlan for scanning a PostgreSQL value into a destination with the same type as target. If
+ // no plan can be found then nil is returned.
+ PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan
+
+ // DecodeDatabaseSQLValue returns src decoded into a value compatible with the sql.Scanner interface.
+ DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error)
+
+ // DecodeValue returns src decoded into its default format.
+ DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error)
+}
+
+type nullAssignmentError struct {
+ dst any
+}
+
+func (e *nullAssignmentError) Error() string {
+ return fmt.Sprintf("cannot assign NULL to %T", e.dst)
+}
+
+// Type represents a PostgreSQL data type. It must not be mutated after it is registered with a Map.
+type Type struct {
+ Codec Codec
+ Name string
+ OID uint32
+}
+
+// Map is the mapping between PostgreSQL server types and Go type handling logic. It can encode values for
+// transmission to a PostgreSQL server and scan received values.
+type Map struct {
+ oidToType map[uint32]*Type
+ nameToType map[string]*Type
+ reflectTypeToName map[reflect.Type]string
+ oidToFormatCode map[uint32]int16
+
+ reflectTypeToType map[reflect.Type]*Type
+
+ memoizedEncodePlans map[uint32]map[reflect.Type][2]EncodePlan
+
+ // TryWrapEncodePlanFuncs is a slice of functions that will wrap a value that cannot be encoded by the Codec. Every
+ // time a wrapper is found the PlanEncode method will be recursively called with the new value. This allows several layers of wrappers
+ // to be built up. There are default functions placed in this slice by NewMap(). In most cases these functions
+ // should run last. i.e. Additional functions should typically be prepended not appended.
+ TryWrapEncodePlanFuncs []TryWrapEncodePlanFunc
+
+ // TryWrapScanPlanFuncs is a slice of functions that will wrap a target that cannot be scanned into by the Codec. Every
+ // time a wrapper is found the PlanScan method will be recursively called with the new target. This allows several layers of wrappers
+ // to be built up. There are default functions placed in this slice by NewMap(). In most cases these functions
+ // should run last. i.e. Additional functions should typically be prepended not appended.
+ TryWrapScanPlanFuncs []TryWrapScanPlanFunc
+}
+
+// Copy returns a new Map containing the same registered types.
+func (m *Map) Copy() *Map {
+ newMap := NewMap()
+ for _, type_ := range m.oidToType {
+ newMap.RegisterType(type_)
+ }
+ return newMap
+}
+
+func NewMap() *Map {
+ defaultMapInitOnce.Do(initDefaultMap)
+
+ return &Map{
+ oidToType: make(map[uint32]*Type),
+ nameToType: make(map[string]*Type),
+ reflectTypeToName: make(map[reflect.Type]string),
+ oidToFormatCode: make(map[uint32]int16),
+
+ memoizedEncodePlans: make(map[uint32]map[reflect.Type][2]EncodePlan),
+
+ TryWrapEncodePlanFuncs: []TryWrapEncodePlanFunc{
+ TryWrapDerefPointerEncodePlan,
+ TryWrapBuiltinTypeEncodePlan,
+ TryWrapFindUnderlyingTypeEncodePlan,
+ TryWrapStructEncodePlan,
+ TryWrapSliceEncodePlan,
+ TryWrapMultiDimSliceEncodePlan,
+ TryWrapArrayEncodePlan,
+ },
+
+ TryWrapScanPlanFuncs: []TryWrapScanPlanFunc{
+ TryPointerPointerScanPlan,
+ TryWrapBuiltinTypeScanPlan,
+ TryFindUnderlyingTypeScanPlan,
+ TryWrapStructScanPlan,
+ TryWrapPtrSliceScanPlan,
+ TryWrapPtrMultiDimSliceScanPlan,
+ TryWrapPtrArrayScanPlan,
+ },
+ }
+}
+
+// RegisterTypes registers multiple data types in the sequence they are provided.
+func (m *Map) RegisterTypes(types []*Type) {
+ for _, t := range types {
+ m.RegisterType(t)
+ }
+}
+
+// RegisterType registers a data type with the Map. t must not be mutated after it is registered.
+func (m *Map) RegisterType(t *Type) {
+ m.oidToType[t.OID] = t
+ m.nameToType[t.Name] = t
+ m.oidToFormatCode[t.OID] = t.Codec.PreferredFormat()
+
+ // Invalidated by type registration
+ m.reflectTypeToType = nil
+ for k := range m.memoizedEncodePlans {
+ delete(m.memoizedEncodePlans, k)
+ }
+}
+
+// RegisterDefaultPgType registers a mapping of a Go type to a PostgreSQL type name. Typically the data type to be
+// encoded or decoded is determined by the PostgreSQL OID. But if the OID of a value to be encoded or decoded is
+// unknown, this additional mapping will be used by TypeForValue to determine a suitable data type.
+func (m *Map) RegisterDefaultPgType(value any, name string) {
+ m.reflectTypeToName[reflect.TypeOf(value)] = name
+
+ // Invalidated by type registration
+ m.reflectTypeToType = nil
+ for k := range m.memoizedEncodePlans {
+ delete(m.memoizedEncodePlans, k)
+ }
+}
+
+// TypeForOID returns the Type registered for the given OID. The returned Type must not be mutated.
+func (m *Map) TypeForOID(oid uint32) (*Type, bool) {
+ if dt, ok := m.oidToType[oid]; ok {
+ return dt, true
+ }
+
+ dt, ok := defaultMap.oidToType[oid]
+ return dt, ok
+}
+
+// TypeForName returns the Type registered for the given name. The returned Type must not be mutated.
+func (m *Map) TypeForName(name string) (*Type, bool) {
+ if dt, ok := m.nameToType[name]; ok {
+ return dt, true
+ }
+ dt, ok := defaultMap.nameToType[name]
+ return dt, ok
+}
+
+func (m *Map) buildReflectTypeToType() {
+ m.reflectTypeToType = make(map[reflect.Type]*Type)
+
+ for reflectType, name := range m.reflectTypeToName {
+ if dt, ok := m.TypeForName(name); ok {
+ m.reflectTypeToType[reflectType] = dt
+ }
+ }
+}
+
+// TypeForValue finds a data type suitable for v. Use RegisterType to register types that can encode and decode
+// themselves. Use RegisterDefaultPgType to register that can be handled by a registered data type. The returned Type
+// must not be mutated.
+func (m *Map) TypeForValue(v any) (*Type, bool) {
+ if m.reflectTypeToType == nil {
+ m.buildReflectTypeToType()
+ }
+
+ if dt, ok := m.reflectTypeToType[reflect.TypeOf(v)]; ok {
+ return dt, true
+ }
+
+ dt, ok := defaultMap.reflectTypeToType[reflect.TypeOf(v)]
+ return dt, ok
+}
+
+// FormatCodeForOID returns the preferred format code for type oid. If the type is not registered it returns the text
+// format code.
+func (m *Map) FormatCodeForOID(oid uint32) int16 {
+ if fc, ok := m.oidToFormatCode[oid]; ok {
+ return fc
+ }
+
+ if fc, ok := defaultMap.oidToFormatCode[oid]; ok {
+ return fc
+ }
+
+ return TextFormatCode
+}
+
+// EncodePlan is a precompiled plan to encode a particular type into a particular OID and format.
+type EncodePlan interface {
+ // Encode appends the encoded bytes of value to buf. If value is the SQL value NULL then append nothing and return
+ // (nil, nil). The caller of Encode is responsible for writing the correct NULL value or the length of the data
+ // written.
+ Encode(value any, buf []byte) (newBuf []byte, err error)
+}
+
+// ScanPlan is a precompiled plan to scan into a type of destination.
+type ScanPlan interface {
+ // Scan scans src into target. src is only valid during the call to Scan. The ScanPlan must not retain a reference to
+ // src.
+ Scan(src []byte, target any) error
+}
+
+type scanPlanCodecSQLScanner struct {
+ c Codec
+ m *Map
+ oid uint32
+ formatCode int16
+}
+
+func (plan *scanPlanCodecSQLScanner) Scan(src []byte, dst any) error {
+ value, err := plan.c.DecodeDatabaseSQLValue(plan.m, plan.oid, plan.formatCode, src)
+ if err != nil {
+ return err
+ }
+
+ scanner := dst.(sql.Scanner)
+ return scanner.Scan(value)
+}
+
+type scanPlanSQLScanner struct {
+ formatCode int16
+}
+
+func (plan *scanPlanSQLScanner) Scan(src []byte, dst any) error {
+ scanner := dst.(sql.Scanner)
+
+ if src == nil {
+ // This is necessary because interface value []byte:nil does not equal nil:nil for the binary format path and the
+ // text format path would be converted to empty string.
+ return scanner.Scan(nil)
+ } else if plan.formatCode == BinaryFormatCode {
+ return scanner.Scan(src)
+ } else {
+ return scanner.Scan(string(src))
+ }
+}
+
+type scanPlanString struct{}
+
+func (scanPlanString) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p := (dst).(*string)
+ *p = string(src)
+ return nil
+}
+
+type scanPlanAnyTextToBytes struct{}
+
+func (scanPlanAnyTextToBytes) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanFail struct {
+ m *Map
+ oid uint32
+ formatCode int16
+}
+
+func (plan *scanPlanFail) Scan(src []byte, dst any) error {
+ // If src is NULL it might be possible to scan into dst even though it is the types are not compatible. While this
+ // may seem to be a contrived case it can occur when selecting NULL directly. PostgreSQL assigns it the type of text.
+ // It would be surprising to the caller to have to cast the NULL (e.g. `select null::int`). So try to figure out a
+ // compatible data type for dst and scan with that.
+ //
+ // See https://github.com/jackc/pgx/issues/1326
+ if src == nil {
+ // As a horrible hack try all types to find anything that can scan into dst.
+ for oid := range plan.m.oidToType {
+ // using planScan instead of Scan or PlanScan to avoid polluting the planned scan cache.
+ plan := plan.m.planScan(oid, plan.formatCode, dst, 0)
+ if _, ok := plan.(*scanPlanFail); !ok {
+ return plan.Scan(src, dst)
+ }
+ }
+ for oid := range defaultMap.oidToType {
+ if _, ok := plan.m.oidToType[oid]; !ok {
+ plan := plan.m.planScan(oid, plan.formatCode, dst, 0)
+ if _, ok := plan.(*scanPlanFail); !ok {
+ return plan.Scan(src, dst)
+ }
+ }
+ }
+ }
+
+ var format string
+ switch plan.formatCode {
+ case TextFormatCode:
+ format = "text"
+ case BinaryFormatCode:
+ format = "binary"
+ default:
+ format = fmt.Sprintf("unknown %d", plan.formatCode)
+ }
+
+ var dataTypeName string
+ if t, ok := plan.m.TypeForOID(plan.oid); ok {
+ dataTypeName = t.Name
+ } else {
+ dataTypeName = "unknown type"
+ }
+
+ return fmt.Errorf("cannot scan %s (OID %d) in %v format into %T", dataTypeName, plan.oid, format, dst)
+}
+
+// TryWrapScanPlanFunc is a function that tries to create a wrapper plan for target. If successful it returns a plan
+// that will convert the target passed to Scan and then call the next plan. nextTarget is target as it will be converted
+// by plan. It must be used to find another suitable ScanPlan. When it is found SetNext must be called on plan for it
+// to be usabled. ok indicates if a suitable wrapper was found.
+type TryWrapScanPlanFunc func(target any) (plan WrappedScanPlanNextSetter, nextTarget any, ok bool)
+
+type pointerPointerScanPlan struct {
+ dstType reflect.Type
+ next ScanPlan
+}
+
+func (plan *pointerPointerScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *pointerPointerScanPlan) Scan(src []byte, dst any) error {
+ el := reflect.ValueOf(dst).Elem()
+ if src == nil {
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+
+ el.Set(reflect.New(el.Type().Elem()))
+ return plan.next.Scan(src, el.Interface())
+}
+
+// TryPointerPointerScanPlan handles a pointer to a pointer by setting the target to nil for SQL NULL and allocating and
+// scanning for non-NULL.
+func TryPointerPointerScanPlan(target any) (plan WrappedScanPlanNextSetter, nextTarget any, ok bool) {
+ if dstValue := reflect.ValueOf(target); dstValue.Kind() == reflect.Ptr {
+ elemValue := dstValue.Elem()
+ if elemValue.Kind() == reflect.Ptr {
+ plan = &pointerPointerScanPlan{dstType: dstValue.Type()}
+ return plan, reflect.Zero(elemValue.Type()).Interface(), true
+ }
+ }
+
+ return nil, nil, false
+}
+
+// SkipUnderlyingTypePlanner prevents PlanScan and PlanDecode from trying to use the underlying type.
+type SkipUnderlyingTypePlanner interface {
+ SkipUnderlyingTypePlan()
+}
+
+var elemKindToPointerTypes map[reflect.Kind]reflect.Type = map[reflect.Kind]reflect.Type{
+ reflect.Int: reflect.TypeOf(new(int)),
+ reflect.Int8: reflect.TypeOf(new(int8)),
+ reflect.Int16: reflect.TypeOf(new(int16)),
+ reflect.Int32: reflect.TypeOf(new(int32)),
+ reflect.Int64: reflect.TypeOf(new(int64)),
+ reflect.Uint: reflect.TypeOf(new(uint)),
+ reflect.Uint8: reflect.TypeOf(new(uint8)),
+ reflect.Uint16: reflect.TypeOf(new(uint16)),
+ reflect.Uint32: reflect.TypeOf(new(uint32)),
+ reflect.Uint64: reflect.TypeOf(new(uint64)),
+ reflect.Float32: reflect.TypeOf(new(float32)),
+ reflect.Float64: reflect.TypeOf(new(float64)),
+ reflect.String: reflect.TypeOf(new(string)),
+ reflect.Bool: reflect.TypeOf(new(bool)),
+}
+
+type underlyingTypeScanPlan struct {
+ dstType reflect.Type
+ nextDstType reflect.Type
+ next ScanPlan
+}
+
+func (plan *underlyingTypeScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *underlyingTypeScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, reflect.ValueOf(dst).Convert(plan.nextDstType).Interface())
+}
+
+// TryFindUnderlyingTypeScanPlan tries to convert to a Go builtin type. e.g. If value was of type MyString and
+// MyString was defined as a string then a wrapper plan would be returned that converts MyString to string.
+func TryFindUnderlyingTypeScanPlan(dst any) (plan WrappedScanPlanNextSetter, nextDst any, ok bool) {
+ if _, ok := dst.(SkipUnderlyingTypePlanner); ok {
+ return nil, nil, false
+ }
+
+ dstValue := reflect.ValueOf(dst)
+
+ if dstValue.Kind() == reflect.Ptr {
+ var elemValue reflect.Value
+ if dstValue.IsNil() {
+ elemValue = reflect.New(dstValue.Type().Elem()).Elem()
+ } else {
+ elemValue = dstValue.Elem()
+ }
+ nextDstType := elemKindToPointerTypes[elemValue.Kind()]
+ if nextDstType == nil {
+ if elemValue.Kind() == reflect.Slice {
+ if elemValue.Type().Elem().Kind() == reflect.Uint8 {
+ var v *[]byte
+ nextDstType = reflect.TypeOf(v)
+ }
+ }
+
+ // Get underlying type of any array.
+ // https://github.com/jackc/pgx/issues/2107
+ if elemValue.Kind() == reflect.Array {
+ nextDstType = reflect.PointerTo(reflect.ArrayOf(elemValue.Len(), elemValue.Type().Elem()))
+ }
+ }
+
+ if nextDstType != nil && dstValue.Type() != nextDstType && dstValue.CanConvert(nextDstType) {
+ return &underlyingTypeScanPlan{dstType: dstValue.Type(), nextDstType: nextDstType}, dstValue.Convert(nextDstType).Interface(), true
+ }
+ }
+
+ return nil, nil, false
+}
+
+type WrappedScanPlanNextSetter interface {
+ SetNext(ScanPlan)
+ ScanPlan
+}
+
+// TryWrapBuiltinTypeScanPlan tries to wrap a builtin type with a wrapper that provides additional methods. e.g. If
+// value was of type int32 then a wrapper plan would be returned that converts target to a value that implements
+// Int64Scanner.
+func TryWrapBuiltinTypeScanPlan(target any) (plan WrappedScanPlanNextSetter, nextDst any, ok bool) {
+ switch target := target.(type) {
+ case *int8:
+ return &wrapInt8ScanPlan{}, (*int8Wrapper)(target), true
+ case *int16:
+ return &wrapInt16ScanPlan{}, (*int16Wrapper)(target), true
+ case *int32:
+ return &wrapInt32ScanPlan{}, (*int32Wrapper)(target), true
+ case *int64:
+ return &wrapInt64ScanPlan{}, (*int64Wrapper)(target), true
+ case *int:
+ return &wrapIntScanPlan{}, (*intWrapper)(target), true
+ case *uint8:
+ return &wrapUint8ScanPlan{}, (*uint8Wrapper)(target), true
+ case *uint16:
+ return &wrapUint16ScanPlan{}, (*uint16Wrapper)(target), true
+ case *uint32:
+ return &wrapUint32ScanPlan{}, (*uint32Wrapper)(target), true
+ case *uint64:
+ return &wrapUint64ScanPlan{}, (*uint64Wrapper)(target), true
+ case *uint:
+ return &wrapUintScanPlan{}, (*uintWrapper)(target), true
+ case *float32:
+ return &wrapFloat32ScanPlan{}, (*float32Wrapper)(target), true
+ case *float64:
+ return &wrapFloat64ScanPlan{}, (*float64Wrapper)(target), true
+ case *string:
+ return &wrapStringScanPlan{}, (*stringWrapper)(target), true
+ case *time.Time:
+ return &wrapTimeScanPlan{}, (*timeWrapper)(target), true
+ case *time.Duration:
+ return &wrapDurationScanPlan{}, (*durationWrapper)(target), true
+ case *net.IPNet:
+ return &wrapNetIPNetScanPlan{}, (*netIPNetWrapper)(target), true
+ case *net.IP:
+ return &wrapNetIPScanPlan{}, (*netIPWrapper)(target), true
+ case *netip.Prefix:
+ return &wrapNetipPrefixScanPlan{}, (*netipPrefixWrapper)(target), true
+ case *netip.Addr:
+ return &wrapNetipAddrScanPlan{}, (*netipAddrWrapper)(target), true
+ case *map[string]*string:
+ return &wrapMapStringToPointerStringScanPlan{}, (*mapStringToPointerStringWrapper)(target), true
+ case *map[string]string:
+ return &wrapMapStringToStringScanPlan{}, (*mapStringToStringWrapper)(target), true
+ case *[16]byte:
+ return &wrapByte16ScanPlan{}, (*byte16Wrapper)(target), true
+ case *[]byte:
+ return &wrapByteSliceScanPlan{}, (*byteSliceWrapper)(target), true
+ }
+
+ return nil, nil, false
+}
+
+type wrapInt8ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapInt8ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapInt8ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*int8Wrapper)(dst.(*int8)))
+}
+
+type wrapInt16ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapInt16ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapInt16ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*int16Wrapper)(dst.(*int16)))
+}
+
+type wrapInt32ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapInt32ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapInt32ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*int32Wrapper)(dst.(*int32)))
+}
+
+type wrapInt64ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapInt64ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapInt64ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*int64Wrapper)(dst.(*int64)))
+}
+
+type wrapIntScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapIntScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapIntScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*intWrapper)(dst.(*int)))
+}
+
+type wrapUint8ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUint8ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUint8ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uint8Wrapper)(dst.(*uint8)))
+}
+
+type wrapUint16ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUint16ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUint16ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uint16Wrapper)(dst.(*uint16)))
+}
+
+type wrapUint32ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUint32ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUint32ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uint32Wrapper)(dst.(*uint32)))
+}
+
+type wrapUint64ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUint64ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUint64ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uint64Wrapper)(dst.(*uint64)))
+}
+
+type wrapUintScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUintScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUintScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uintWrapper)(dst.(*uint)))
+}
+
+type wrapFloat32ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapFloat32ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapFloat32ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*float32Wrapper)(dst.(*float32)))
+}
+
+type wrapFloat64ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapFloat64ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapFloat64ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*float64Wrapper)(dst.(*float64)))
+}
+
+type wrapStringScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapStringScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapStringScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*stringWrapper)(dst.(*string)))
+}
+
+type wrapTimeScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapTimeScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapTimeScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*timeWrapper)(dst.(*time.Time)))
+}
+
+type wrapDurationScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapDurationScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapDurationScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*durationWrapper)(dst.(*time.Duration)))
+}
+
+type wrapNetIPNetScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapNetIPNetScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapNetIPNetScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*netIPNetWrapper)(dst.(*net.IPNet)))
+}
+
+type wrapNetIPScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapNetIPScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapNetIPScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*netIPWrapper)(dst.(*net.IP)))
+}
+
+type wrapNetipPrefixScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapNetipPrefixScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapNetipPrefixScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*netipPrefixWrapper)(dst.(*netip.Prefix)))
+}
+
+type wrapNetipAddrScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapNetipAddrScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapNetipAddrScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*netipAddrWrapper)(dst.(*netip.Addr)))
+}
+
+type wrapMapStringToPointerStringScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapMapStringToPointerStringScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapMapStringToPointerStringScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*mapStringToPointerStringWrapper)(dst.(*map[string]*string)))
+}
+
+type wrapMapStringToStringScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapMapStringToStringScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapMapStringToStringScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*mapStringToStringWrapper)(dst.(*map[string]string)))
+}
+
+type wrapByte16ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapByte16ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapByte16ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*byte16Wrapper)(dst.(*[16]byte)))
+}
+
+type wrapByteSliceScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapByteSliceScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapByteSliceScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*byteSliceWrapper)(dst.(*[]byte)))
+}
+
+type pointerEmptyInterfaceScanPlan struct {
+ codec Codec
+ m *Map
+ oid uint32
+ formatCode int16
+}
+
+func (plan *pointerEmptyInterfaceScanPlan) Scan(src []byte, dst any) error {
+ value, err := plan.codec.DecodeValue(plan.m, plan.oid, plan.formatCode, src)
+ if err != nil {
+ return err
+ }
+
+ ptrAny := dst.(*any)
+ *ptrAny = value
+
+ return nil
+}
+
+// TryWrapStructPlan tries to wrap a struct with a wrapper that implements CompositeIndexGetter.
+func TryWrapStructScanPlan(target any) (plan WrappedScanPlanNextSetter, nextValue any, ok bool) {
+ targetValue := reflect.ValueOf(target)
+ if targetValue.Kind() != reflect.Ptr {
+ return nil, nil, false
+ }
+
+ var targetElemValue reflect.Value
+ if targetValue.IsNil() {
+ targetElemValue = reflect.Zero(targetValue.Type().Elem())
+ } else {
+ targetElemValue = targetValue.Elem()
+ }
+ targetElemType := targetElemValue.Type()
+
+ if targetElemType.Kind() == reflect.Struct {
+ exportedFields := getExportedFieldValues(targetElemValue)
+ if len(exportedFields) == 0 {
+ return nil, nil, false
+ }
+
+ w := ptrStructWrapper{
+ s: target,
+ exportedFields: exportedFields,
+ }
+ return &wrapAnyPtrStructScanPlan{}, &w, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapAnyPtrStructScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapAnyPtrStructScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapAnyPtrStructScanPlan) Scan(src []byte, target any) error {
+ w := ptrStructWrapper{
+ s: target,
+ exportedFields: getExportedFieldValues(reflect.ValueOf(target).Elem()),
+ }
+
+ return plan.next.Scan(src, &w)
+}
+
+// TryWrapPtrSliceScanPlan tries to wrap a pointer to a single dimension slice.
+func TryWrapPtrSliceScanPlan(target any) (plan WrappedScanPlanNextSetter, nextValue any, ok bool) {
+ // Avoid using reflect path for common types.
+ switch target := target.(type) {
+ case *[]int16:
+ return &wrapPtrSliceScanPlan[int16]{}, (*FlatArray[int16])(target), true
+ case *[]int32:
+ return &wrapPtrSliceScanPlan[int32]{}, (*FlatArray[int32])(target), true
+ case *[]int64:
+ return &wrapPtrSliceScanPlan[int64]{}, (*FlatArray[int64])(target), true
+ case *[]float32:
+ return &wrapPtrSliceScanPlan[float32]{}, (*FlatArray[float32])(target), true
+ case *[]float64:
+ return &wrapPtrSliceScanPlan[float64]{}, (*FlatArray[float64])(target), true
+ case *[]string:
+ return &wrapPtrSliceScanPlan[string]{}, (*FlatArray[string])(target), true
+ case *[]time.Time:
+ return &wrapPtrSliceScanPlan[time.Time]{}, (*FlatArray[time.Time])(target), true
+ }
+
+ targetType := reflect.TypeOf(target)
+ if targetType.Kind() != reflect.Ptr {
+ return nil, nil, false
+ }
+
+ targetElemType := targetType.Elem()
+
+ if targetElemType.Kind() == reflect.Slice {
+ slice := reflect.New(targetElemType).Elem()
+ return &wrapPtrSliceReflectScanPlan{}, &anySliceArrayReflect{slice: slice}, true
+ }
+ return nil, nil, false
+}
+
+type wrapPtrSliceScanPlan[T any] struct {
+ next ScanPlan
+}
+
+func (plan *wrapPtrSliceScanPlan[T]) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapPtrSliceScanPlan[T]) Scan(src []byte, target any) error {
+ return plan.next.Scan(src, (*FlatArray[T])(target.(*[]T)))
+}
+
+type wrapPtrSliceReflectScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapPtrSliceReflectScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapPtrSliceReflectScanPlan) Scan(src []byte, target any) error {
+ return plan.next.Scan(src, &anySliceArrayReflect{slice: reflect.ValueOf(target).Elem()})
+}
+
+// TryWrapPtrMultiDimSliceScanPlan tries to wrap a pointer to a multi-dimension slice.
+func TryWrapPtrMultiDimSliceScanPlan(target any) (plan WrappedScanPlanNextSetter, nextValue any, ok bool) {
+ targetValue := reflect.ValueOf(target)
+ if targetValue.Kind() != reflect.Ptr {
+ return nil, nil, false
+ }
+
+ targetElemValue := targetValue.Elem()
+
+ if targetElemValue.Kind() == reflect.Slice {
+ elemElemKind := targetElemValue.Type().Elem().Kind()
+ if elemElemKind == reflect.Slice {
+ if !isRagged(targetElemValue) {
+ return &wrapPtrMultiDimSliceScanPlan{}, &anyMultiDimSliceArray{slice: targetValue.Elem()}, true
+ }
+ }
+ }
+
+ return nil, nil, false
+}
+
+type wrapPtrMultiDimSliceScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapPtrMultiDimSliceScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapPtrMultiDimSliceScanPlan) Scan(src []byte, target any) error {
+ return plan.next.Scan(src, &anyMultiDimSliceArray{slice: reflect.ValueOf(target).Elem()})
+}
+
+// TryWrapPtrArrayScanPlan tries to wrap a pointer to a single dimension array.
+func TryWrapPtrArrayScanPlan(target any) (plan WrappedScanPlanNextSetter, nextValue any, ok bool) {
+ targetValue := reflect.ValueOf(target)
+ if targetValue.Kind() != reflect.Ptr {
+ return nil, nil, false
+ }
+
+ targetElemValue := targetValue.Elem()
+
+ if targetElemValue.Kind() == reflect.Array {
+ return &wrapPtrArrayReflectScanPlan{}, &anyArrayArrayReflect{array: targetElemValue}, true
+ }
+ return nil, nil, false
+}
+
+type wrapPtrArrayReflectScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapPtrArrayReflectScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapPtrArrayReflectScanPlan) Scan(src []byte, target any) error {
+ return plan.next.Scan(src, &anyArrayArrayReflect{array: reflect.ValueOf(target).Elem()})
+}
+
+// PlanScan prepares a plan to scan a value into target.
+func (m *Map) PlanScan(oid uint32, formatCode int16, target any) ScanPlan {
+ return m.planScan(oid, formatCode, target, 0)
+}
+
+func (m *Map) planScan(oid uint32, formatCode int16, target any, depth int) ScanPlan {
+ if depth > 8 {
+ return &scanPlanFail{m: m, oid: oid, formatCode: formatCode}
+ }
+
+ if target == nil {
+ return &scanPlanFail{m: m, oid: oid, formatCode: formatCode}
+ }
+
+ if _, ok := target.(*UndecodedBytes); ok {
+ return scanPlanAnyToUndecodedBytes{}
+ }
+
+ switch formatCode {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *string:
+ switch oid {
+ case TextOID, VarcharOID:
+ return scanPlanString{}
+ }
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *string:
+ return scanPlanString{}
+ case *[]byte:
+ if oid != ByteaOID {
+ return scanPlanAnyTextToBytes{}
+ }
+ case TextScanner:
+ return scanPlanTextAnyToTextScanner{}
+ }
+ }
+
+ var dt *Type
+
+ if dataType, ok := m.TypeForOID(oid); ok {
+ dt = dataType
+ } else if dataType, ok := m.TypeForValue(target); ok {
+ dt = dataType
+ oid = dt.OID // Preserve assumed OID in case we are recursively called below.
+ }
+
+ if dt != nil {
+ if plan := dt.Codec.PlanScan(m, oid, formatCode, target); plan != nil {
+ return plan
+ }
+ }
+
+ // This needs to happen before trying m.TryWrapScanPlanFuncs. Otherwise, a sql.Scanner would not get called if it was
+ // defined on a type that could be unwrapped such as `type myString string`.
+ //
+ // https://github.com/jackc/pgtype/issues/197
+ if _, ok := target.(sql.Scanner); ok {
+ if dt == nil {
+ return &scanPlanSQLScanner{formatCode: formatCode}
+ } else {
+ return &scanPlanCodecSQLScanner{c: dt.Codec, m: m, oid: oid, formatCode: formatCode}
+ }
+ }
+
+ for _, f := range m.TryWrapScanPlanFuncs {
+ if wrapperPlan, nextDst, ok := f(target); ok {
+ if nextPlan := m.planScan(oid, formatCode, nextDst, depth+1); nextPlan != nil {
+ if _, failed := nextPlan.(*scanPlanFail); !failed {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+ }
+
+ if dt != nil {
+ if _, ok := target.(*any); ok {
+ return &pointerEmptyInterfaceScanPlan{codec: dt.Codec, m: m, oid: oid, formatCode: formatCode}
+ }
+ }
+
+ return &scanPlanFail{m: m, oid: oid, formatCode: formatCode}
+}
+
+func (m *Map) Scan(oid uint32, formatCode int16, src []byte, dst any) error {
+ if dst == nil {
+ return nil
+ }
+
+ plan := m.PlanScan(oid, formatCode, dst)
+ return plan.Scan(src, dst)
+}
+
+var ErrScanTargetTypeChanged = errors.New("scan target type changed")
+
+func codecScan(codec Codec, m *Map, oid uint32, format int16, src []byte, dst any) error {
+ scanPlan := codec.PlanScan(m, oid, format, dst)
+ if scanPlan == nil {
+ return fmt.Errorf("PlanScan did not find a plan")
+ }
+ return scanPlan.Scan(src, dst)
+}
+
+func codecDecodeToTextFormat(codec Codec, m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ if format == TextFormatCode {
+ return string(src), nil
+ } else {
+ value, err := codec.DecodeValue(m, oid, format, src)
+ if err != nil {
+ return nil, err
+ }
+ buf, err := m.Encode(oid, TextFormatCode, value, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), nil
+ }
+}
+
+// PlanEncode returns an EncodePlan for encoding value into PostgreSQL format for oid and format. If no plan can be
+// found then nil is returned.
+func (m *Map) PlanEncode(oid uint32, format int16, value any) EncodePlan {
+ return m.planEncodeDepth(oid, format, value, 0)
+}
+
+func (m *Map) planEncodeDepth(oid uint32, format int16, value any, depth int) EncodePlan {
+ // Guard against infinite recursion.
+ if depth > 8 {
+ return nil
+ }
+
+ oidMemo := m.memoizedEncodePlans[oid]
+ if oidMemo == nil {
+ oidMemo = make(map[reflect.Type][2]EncodePlan)
+ m.memoizedEncodePlans[oid] = oidMemo
+ }
+ targetReflectType := reflect.TypeOf(value)
+ typeMemo := oidMemo[targetReflectType]
+ plan := typeMemo[format]
+ if plan == nil {
+ plan = m.planEncode(oid, format, value, depth)
+ typeMemo[format] = plan
+ oidMemo[targetReflectType] = typeMemo
+ }
+
+ return plan
+}
+
+func (m *Map) planEncode(oid uint32, format int16, value any, depth int) EncodePlan {
+ if format == TextFormatCode {
+ switch value.(type) {
+ case string:
+ return encodePlanStringToAnyTextFormat{}
+ case TextValuer:
+ return encodePlanTextValuerToAnyTextFormat{}
+ }
+ }
+
+ var dt *Type
+ if dataType, ok := m.TypeForOID(oid); ok {
+ dt = dataType
+ } else {
+ // If no type for the OID was found, then either it is unknowable (e.g. the simple protocol) or it is an
+ // unregistered type. In either case try to find the type and OID that matches the value (e.g. a []byte would be
+ // registered to PostgreSQL bytea).
+ if dataType, ok := m.TypeForValue(value); ok {
+ dt = dataType
+ oid = dt.OID // Preserve assumed OID in case we are recursively called below.
+ }
+ }
+
+ if dt != nil {
+ if plan := dt.Codec.PlanEncode(m, oid, format, value); plan != nil {
+ return plan
+ }
+ }
+
+ for _, f := range m.TryWrapEncodePlanFuncs {
+ if wrapperPlan, nextValue, ok := f(value); ok {
+ if nextPlan := m.planEncodeDepth(oid, format, nextValue, depth+1); nextPlan != nil {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ if _, ok := value.(driver.Valuer); ok {
+ return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
+ }
+
+ return nil
+}
+
+type encodePlanStringToAnyTextFormat struct{}
+
+func (encodePlanStringToAnyTextFormat) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ s := value.(string)
+ return append(buf, s...), nil
+}
+
+type encodePlanTextValuerToAnyTextFormat struct{}
+
+func (encodePlanTextValuerToAnyTextFormat) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TextValuer).TextValue()
+ if err != nil {
+ return nil, err
+ }
+ if !t.Valid {
+ return nil, nil
+ }
+
+ return append(buf, t.String...), nil
+}
+
+type encodePlanDriverValuer struct {
+ m *Map
+ oid uint32
+ formatCode int16
+}
+
+func (plan *encodePlanDriverValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ dv := value.(driver.Valuer)
+ if dv == nil {
+ return nil, nil
+ }
+ v, err := dv.Value()
+ if err != nil {
+ return nil, err
+ }
+ if v == nil {
+ return nil, nil
+ }
+
+ newBuf, err = plan.m.Encode(plan.oid, plan.formatCode, v, buf)
+ if err == nil {
+ return newBuf, nil
+ }
+
+ s, ok := v.(string)
+ if !ok {
+ return nil, err
+ }
+
+ var scannedValue any
+ scanErr := plan.m.Scan(plan.oid, TextFormatCode, []byte(s), &scannedValue)
+ if scanErr != nil {
+ return nil, err
+ }
+
+ // Prevent infinite loop. We can't encode this. See https://github.com/jackc/pgx/issues/1331.
+ if reflect.TypeOf(value) == reflect.TypeOf(scannedValue) {
+ return nil, fmt.Errorf("tried to encode %v via encoding to text and scanning but failed due to receiving same type back", value)
+ }
+
+ var err2 error
+ newBuf, err2 = plan.m.Encode(plan.oid, BinaryFormatCode, scannedValue, buf)
+ if err2 != nil {
+ return nil, err
+ }
+
+ return newBuf, nil
+}
+
+// TryWrapEncodePlanFunc is a function that tries to create a wrapper plan for value. If successful it returns a plan
+// that will convert the value passed to Encode and then call the next plan. nextValue is value as it will be converted
+// by plan. It must be used to find another suitable EncodePlan. When it is found SetNext must be called on plan for it
+// to be usabled. ok indicates if a suitable wrapper was found.
+type TryWrapEncodePlanFunc func(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool)
+
+type derefPointerEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *derefPointerEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *derefPointerEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ptr := reflect.ValueOf(value)
+
+ if ptr.IsNil() {
+ return nil, nil
+ }
+
+ return plan.next.Encode(ptr.Elem().Interface(), buf)
+}
+
+// TryWrapDerefPointerEncodePlan tries to dereference a pointer. e.g. If value was of type *string then a wrapper plan
+// would be returned that dereferences the value.
+func TryWrapDerefPointerEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ if valueType := reflect.TypeOf(value); valueType != nil && valueType.Kind() == reflect.Ptr {
+ return &derefPointerEncodePlan{}, reflect.New(valueType.Elem()).Elem().Interface(), true
+ }
+
+ return nil, nil, false
+}
+
+var kindToTypes map[reflect.Kind]reflect.Type = map[reflect.Kind]reflect.Type{
+ reflect.Int: reflect.TypeOf(int(0)),
+ reflect.Int8: reflect.TypeOf(int8(0)),
+ reflect.Int16: reflect.TypeOf(int16(0)),
+ reflect.Int32: reflect.TypeOf(int32(0)),
+ reflect.Int64: reflect.TypeOf(int64(0)),
+ reflect.Uint: reflect.TypeOf(uint(0)),
+ reflect.Uint8: reflect.TypeOf(uint8(0)),
+ reflect.Uint16: reflect.TypeOf(uint16(0)),
+ reflect.Uint32: reflect.TypeOf(uint32(0)),
+ reflect.Uint64: reflect.TypeOf(uint64(0)),
+ reflect.Float32: reflect.TypeOf(float32(0)),
+ reflect.Float64: reflect.TypeOf(float64(0)),
+ reflect.String: reflect.TypeOf(""),
+ reflect.Bool: reflect.TypeOf(false),
+}
+
+var byteSliceType = reflect.TypeOf([]byte{})
+
+type underlyingTypeEncodePlan struct {
+ nextValueType reflect.Type
+ next EncodePlan
+}
+
+func (plan *underlyingTypeEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *underlyingTypeEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(reflect.ValueOf(value).Convert(plan.nextValueType).Interface(), buf)
+}
+
+// TryWrapFindUnderlyingTypeEncodePlan tries to convert to a Go builtin type. e.g. If value was of type MyString and
+// MyString was defined as a string then a wrapper plan would be returned that converts MyString to string.
+func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if value == nil {
+ return nil, nil, false
+ }
+
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ if _, ok := value.(SkipUnderlyingTypePlanner); ok {
+ return nil, nil, false
+ }
+
+ refValue := reflect.ValueOf(value)
+
+ nextValueType := kindToTypes[refValue.Kind()]
+ if nextValueType != nil && refValue.Type() != nextValueType {
+ return &underlyingTypeEncodePlan{nextValueType: nextValueType}, refValue.Convert(nextValueType).Interface(), true
+ }
+
+ // []byte is a special case. It is a slice but we treat it as a scalar type. In the case of a named type like
+ // json.RawMessage which is defined as []byte the underlying type should be considered as []byte. But any other slice
+ // does not have a special underlying type.
+ //
+ // https://github.com/jackc/pgx/issues/1763
+ if refValue.Type() != byteSliceType && refValue.Type().AssignableTo(byteSliceType) {
+ return &underlyingTypeEncodePlan{nextValueType: byteSliceType}, refValue.Convert(byteSliceType).Interface(), true
+ }
+
+ // Get underlying type of any array.
+ // https://github.com/jackc/pgx/issues/2107
+ if refValue.Kind() == reflect.Array {
+ underlyingArrayType := reflect.ArrayOf(refValue.Len(), refValue.Type().Elem())
+ if refValue.Type() != underlyingArrayType {
+ return &underlyingTypeEncodePlan{nextValueType: underlyingArrayType}, refValue.Convert(underlyingArrayType).Interface(), true
+ }
+ }
+
+ return nil, nil, false
+}
+
+type WrappedEncodePlanNextSetter interface {
+ SetNext(EncodePlan)
+ EncodePlan
+}
+
+// TryWrapBuiltinTypeEncodePlan tries to wrap a builtin type with a wrapper that provides additional methods. e.g. If
+// value was of type int32 then a wrapper plan would be returned that converts value to a type that implements
+// Int64Valuer.
+func TryWrapBuiltinTypeEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ switch value := value.(type) {
+ case int8:
+ return &wrapInt8EncodePlan{}, int8Wrapper(value), true
+ case int16:
+ return &wrapInt16EncodePlan{}, int16Wrapper(value), true
+ case int32:
+ return &wrapInt32EncodePlan{}, int32Wrapper(value), true
+ case int64:
+ return &wrapInt64EncodePlan{}, int64Wrapper(value), true
+ case int:
+ return &wrapIntEncodePlan{}, intWrapper(value), true
+ case uint8:
+ return &wrapUint8EncodePlan{}, uint8Wrapper(value), true
+ case uint16:
+ return &wrapUint16EncodePlan{}, uint16Wrapper(value), true
+ case uint32:
+ return &wrapUint32EncodePlan{}, uint32Wrapper(value), true
+ case uint64:
+ return &wrapUint64EncodePlan{}, uint64Wrapper(value), true
+ case uint:
+ return &wrapUintEncodePlan{}, uintWrapper(value), true
+ case float32:
+ return &wrapFloat32EncodePlan{}, float32Wrapper(value), true
+ case float64:
+ return &wrapFloat64EncodePlan{}, float64Wrapper(value), true
+ case string:
+ return &wrapStringEncodePlan{}, stringWrapper(value), true
+ case time.Time:
+ return &wrapTimeEncodePlan{}, timeWrapper(value), true
+ case time.Duration:
+ return &wrapDurationEncodePlan{}, durationWrapper(value), true
+ case net.IPNet:
+ return &wrapNetIPNetEncodePlan{}, netIPNetWrapper(value), true
+ case net.IP:
+ return &wrapNetIPEncodePlan{}, netIPWrapper(value), true
+ case netip.Prefix:
+ return &wrapNetipPrefixEncodePlan{}, netipPrefixWrapper(value), true
+ case netip.Addr:
+ return &wrapNetipAddrEncodePlan{}, netipAddrWrapper(value), true
+ case map[string]*string:
+ return &wrapMapStringToPointerStringEncodePlan{}, mapStringToPointerStringWrapper(value), true
+ case map[string]string:
+ return &wrapMapStringToStringEncodePlan{}, mapStringToStringWrapper(value), true
+ case [16]byte:
+ return &wrapByte16EncodePlan{}, byte16Wrapper(value), true
+ case []byte:
+ return &wrapByteSliceEncodePlan{}, byteSliceWrapper(value), true
+ case fmt.Stringer:
+ return &wrapFmtStringerEncodePlan{}, fmtStringerWrapper{value}, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapInt8EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapInt8EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapInt8EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(int8Wrapper(value.(int8)), buf)
+}
+
+type wrapInt16EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapInt16EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapInt16EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(int16Wrapper(value.(int16)), buf)
+}
+
+type wrapInt32EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapInt32EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapInt32EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(int32Wrapper(value.(int32)), buf)
+}
+
+type wrapInt64EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapInt64EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapInt64EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(int64Wrapper(value.(int64)), buf)
+}
+
+type wrapIntEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapIntEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapIntEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(intWrapper(value.(int)), buf)
+}
+
+type wrapUint8EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUint8EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUint8EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uint8Wrapper(value.(uint8)), buf)
+}
+
+type wrapUint16EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUint16EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUint16EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uint16Wrapper(value.(uint16)), buf)
+}
+
+type wrapUint32EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUint32EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUint32EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uint32Wrapper(value.(uint32)), buf)
+}
+
+type wrapUint64EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUint64EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUint64EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uint64Wrapper(value.(uint64)), buf)
+}
+
+type wrapUintEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUintEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUintEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uintWrapper(value.(uint)), buf)
+}
+
+type wrapFloat32EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapFloat32EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapFloat32EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(float32Wrapper(value.(float32)), buf)
+}
+
+type wrapFloat64EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapFloat64EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapFloat64EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(float64Wrapper(value.(float64)), buf)
+}
+
+type wrapStringEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapStringEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapStringEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(stringWrapper(value.(string)), buf)
+}
+
+type wrapTimeEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapTimeEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapTimeEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(timeWrapper(value.(time.Time)), buf)
+}
+
+type wrapDurationEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapDurationEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapDurationEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(durationWrapper(value.(time.Duration)), buf)
+}
+
+type wrapNetIPNetEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapNetIPNetEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapNetIPNetEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(netIPNetWrapper(value.(net.IPNet)), buf)
+}
+
+type wrapNetIPEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapNetIPEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapNetIPEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(netIPWrapper(value.(net.IP)), buf)
+}
+
+type wrapNetipPrefixEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapNetipPrefixEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapNetipPrefixEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(netipPrefixWrapper(value.(netip.Prefix)), buf)
+}
+
+type wrapNetipAddrEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapNetipAddrEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapNetipAddrEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(netipAddrWrapper(value.(netip.Addr)), buf)
+}
+
+type wrapMapStringToPointerStringEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapMapStringToPointerStringEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapMapStringToPointerStringEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(mapStringToPointerStringWrapper(value.(map[string]*string)), buf)
+}
+
+type wrapMapStringToStringEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapMapStringToStringEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapMapStringToStringEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(mapStringToStringWrapper(value.(map[string]string)), buf)
+}
+
+type wrapByte16EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapByte16EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapByte16EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(byte16Wrapper(value.([16]byte)), buf)
+}
+
+type wrapByteSliceEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapByteSliceEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapByteSliceEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(byteSliceWrapper(value.([]byte)), buf)
+}
+
+type wrapFmtStringerEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapFmtStringerEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapFmtStringerEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(fmtStringerWrapper{value.(fmt.Stringer)}, buf)
+}
+
+// TryWrapStructPlan tries to wrap a struct with a wrapper that implements CompositeIndexGetter.
+func TryWrapStructEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ if valueType := reflect.TypeOf(value); valueType != nil && valueType.Kind() == reflect.Struct {
+ exportedFields := getExportedFieldValues(reflect.ValueOf(value))
+ if len(exportedFields) == 0 {
+ return nil, nil, false
+ }
+
+ w := structWrapper{
+ s: value,
+ exportedFields: exportedFields,
+ }
+ return &wrapAnyStructEncodePlan{}, w, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapAnyStructEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapAnyStructEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapAnyStructEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ w := structWrapper{
+ s: value,
+ exportedFields: getExportedFieldValues(reflect.ValueOf(value)),
+ }
+
+ return plan.next.Encode(w, buf)
+}
+
+func getExportedFieldValues(structValue reflect.Value) []reflect.Value {
+ structType := structValue.Type()
+ exportedFields := make([]reflect.Value, 0, structValue.NumField())
+ for i := 0; i < structType.NumField(); i++ {
+ sf := structType.Field(i)
+ if sf.IsExported() {
+ exportedFields = append(exportedFields, structValue.Field(i))
+ }
+ }
+
+ return exportedFields
+}
+
+func TryWrapSliceEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ // Avoid using reflect path for common types.
+ switch value := value.(type) {
+ case []int16:
+ return &wrapSliceEncodePlan[int16]{}, (FlatArray[int16])(value), true
+ case []int32:
+ return &wrapSliceEncodePlan[int32]{}, (FlatArray[int32])(value), true
+ case []int64:
+ return &wrapSliceEncodePlan[int64]{}, (FlatArray[int64])(value), true
+ case []float32:
+ return &wrapSliceEncodePlan[float32]{}, (FlatArray[float32])(value), true
+ case []float64:
+ return &wrapSliceEncodePlan[float64]{}, (FlatArray[float64])(value), true
+ case []string:
+ return &wrapSliceEncodePlan[string]{}, (FlatArray[string])(value), true
+ case []time.Time:
+ return &wrapSliceEncodePlan[time.Time]{}, (FlatArray[time.Time])(value), true
+ }
+
+ if valueType := reflect.TypeOf(value); valueType != nil && valueType.Kind() == reflect.Slice {
+ w := anySliceArrayReflect{
+ slice: reflect.ValueOf(value),
+ }
+ return &wrapSliceEncodeReflectPlan{}, w, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapSliceEncodePlan[T any] struct {
+ next EncodePlan
+}
+
+func (plan *wrapSliceEncodePlan[T]) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapSliceEncodePlan[T]) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode((FlatArray[T])(value.([]T)), buf)
+}
+
+type wrapSliceEncodeReflectPlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapSliceEncodeReflectPlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapSliceEncodeReflectPlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ w := anySliceArrayReflect{
+ slice: reflect.ValueOf(value),
+ }
+
+ return plan.next.Encode(w, buf)
+}
+
+func TryWrapMultiDimSliceEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ sliceValue := reflect.ValueOf(value)
+ if sliceValue.Kind() == reflect.Slice {
+ valueElemType := sliceValue.Type().Elem()
+
+ if valueElemType.Kind() == reflect.Slice {
+ if !isRagged(sliceValue) {
+ w := anyMultiDimSliceArray{
+ slice: reflect.ValueOf(value),
+ }
+ return &wrapMultiDimSliceEncodePlan{}, &w, true
+ }
+ }
+ }
+
+ return nil, nil, false
+}
+
+type wrapMultiDimSliceEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapMultiDimSliceEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapMultiDimSliceEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ w := anyMultiDimSliceArray{
+ slice: reflect.ValueOf(value),
+ }
+
+ return plan.next.Encode(&w, buf)
+}
+
+func TryWrapArrayEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ if valueType := reflect.TypeOf(value); valueType != nil && valueType.Kind() == reflect.Array {
+ w := anyArrayArrayReflect{
+ array: reflect.ValueOf(value),
+ }
+ return &wrapArrayEncodeReflectPlan{}, w, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapArrayEncodeReflectPlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapArrayEncodeReflectPlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapArrayEncodeReflectPlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ w := anyArrayArrayReflect{
+ array: reflect.ValueOf(value),
+ }
+
+ return plan.next.Encode(w, buf)
+}
+
+func newEncodeError(value any, m *Map, oid uint32, formatCode int16, err error) error {
+ var format string
+ switch formatCode {
+ case TextFormatCode:
+ format = "text"
+ case BinaryFormatCode:
+ format = "binary"
+ default:
+ format = fmt.Sprintf("unknown (%d)", formatCode)
+ }
+
+ var dataTypeName string
+ if t, ok := m.TypeForOID(oid); ok {
+ dataTypeName = t.Name
+ } else {
+ dataTypeName = "unknown type"
+ }
+
+ return fmt.Errorf("unable to encode %#v into %s format for %s (OID %d): %w", value, format, dataTypeName, oid, err)
+}
+
+// Encode appends the encoded bytes of value to buf. If value is the SQL value NULL then append nothing and return
+// (nil, nil). The caller of Encode is responsible for writing the correct NULL value or the length of the data
+// written.
+func (m *Map) Encode(oid uint32, formatCode int16, value any, buf []byte) (newBuf []byte, err error) {
+ if isNil, callNilDriverValuer := isNilDriverValuer(value); isNil {
+ if callNilDriverValuer {
+ newBuf, err = (&encodePlanDriverValuer{m: m, oid: oid, formatCode: formatCode}).Encode(value, buf)
+ if err != nil {
+ return nil, newEncodeError(value, m, oid, formatCode, err)
+ }
+
+ return newBuf, nil
+ } else {
+ return nil, nil
+ }
+ }
+
+ plan := m.PlanEncode(oid, formatCode, value)
+ if plan == nil {
+ return nil, newEncodeError(value, m, oid, formatCode, errors.New("cannot find encode plan"))
+ }
+
+ newBuf, err = plan.Encode(value, buf)
+ if err != nil {
+ return nil, newEncodeError(value, m, oid, formatCode, err)
+ }
+
+ return newBuf, nil
+}
+
+// SQLScanner returns a database/sql.Scanner for v. This is necessary for types like Array[T] and Range[T] where the
+// type needs assistance from Map to implement the sql.Scanner interface. It is not necessary for types like Box that
+// implement sql.Scanner directly.
+//
+// This uses the type of v to look up the PostgreSQL OID that v presumably came from. This means v must be registered
+// with m by calling RegisterDefaultPgType.
+func (m *Map) SQLScanner(v any) sql.Scanner {
+ if s, ok := v.(sql.Scanner); ok {
+ return s
+ }
+
+ return &sqlScannerWrapper{m: m, v: v}
+}
+
+type sqlScannerWrapper struct {
+ m *Map
+ v any
+}
+
+func (w *sqlScannerWrapper) Scan(src any) error {
+ t, ok := w.m.TypeForValue(w.v)
+ if !ok {
+ return fmt.Errorf("cannot convert to sql.Scanner: cannot find registered type for %T", w.v)
+ }
+
+ var bufSrc []byte
+ if src != nil {
+ switch src := src.(type) {
+ case string:
+ bufSrc = []byte(src)
+ case []byte:
+ bufSrc = src
+ default:
+ bufSrc = []byte(fmt.Sprint(bufSrc))
+ }
+ }
+
+ return w.m.Scan(t.OID, TextFormatCode, bufSrc, w.v)
+}
+
+var valuerReflectType = reflect.TypeFor[driver.Valuer]()
+
+// isNilDriverValuer returns true if value is any type of nil unless it implements driver.Valuer. *T is not considered to implement
+// driver.Valuer if it is only implemented by T.
+func isNilDriverValuer(value any) (isNil bool, callNilDriverValuer bool) {
+ if value == nil {
+ return true, false
+ }
+
+ refVal := reflect.ValueOf(value)
+ kind := refVal.Kind()
+ switch kind {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
+ if !refVal.IsNil() {
+ return false, false
+ }
+
+ if _, ok := value.(driver.Valuer); ok {
+ if kind == reflect.Ptr {
+ // The type assertion will succeed if driver.Valuer is implemented on T or *T. Check if it is implemented on *T
+ // by checking if it is not implemented on *T.
+ return true, !refVal.Type().Elem().Implements(valuerReflectType)
+ } else {
+ return true, true
+ }
+ }
+
+ return true, false
+ default:
+ return false, false
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
new file mode 100644
index 0000000..5648d89
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
@@ -0,0 +1,248 @@
+package pgtype
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "net"
+ "net/netip"
+ "reflect"
+ "sync"
+ "time"
+)
+
+var (
+ // defaultMap contains default mappings between PostgreSQL server types and Go type handling logic.
+ defaultMap *Map
+ defaultMapInitOnce = sync.Once{}
+)
+
+func initDefaultMap() {
+ defaultMap = &Map{
+ oidToType: make(map[uint32]*Type),
+ nameToType: make(map[string]*Type),
+ reflectTypeToName: make(map[reflect.Type]string),
+ oidToFormatCode: make(map[uint32]int16),
+
+ memoizedEncodePlans: make(map[uint32]map[reflect.Type][2]EncodePlan),
+
+ TryWrapEncodePlanFuncs: []TryWrapEncodePlanFunc{
+ TryWrapDerefPointerEncodePlan,
+ TryWrapBuiltinTypeEncodePlan,
+ TryWrapFindUnderlyingTypeEncodePlan,
+ TryWrapStructEncodePlan,
+ TryWrapSliceEncodePlan,
+ TryWrapMultiDimSliceEncodePlan,
+ TryWrapArrayEncodePlan,
+ },
+
+ TryWrapScanPlanFuncs: []TryWrapScanPlanFunc{
+ TryPointerPointerScanPlan,
+ TryWrapBuiltinTypeScanPlan,
+ TryFindUnderlyingTypeScanPlan,
+ TryWrapStructScanPlan,
+ TryWrapPtrSliceScanPlan,
+ TryWrapPtrMultiDimSliceScanPlan,
+ TryWrapPtrArrayScanPlan,
+ },
+ }
+
+ // Base types
+ defaultMap.RegisterType(&Type{Name: "aclitem", OID: ACLItemOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
+ defaultMap.RegisterType(&Type{Name: "bit", OID: BitOID, Codec: BitsCodec{}})
+ defaultMap.RegisterType(&Type{Name: "bool", OID: BoolOID, Codec: BoolCodec{}})
+ defaultMap.RegisterType(&Type{Name: "box", OID: BoxOID, Codec: BoxCodec{}})
+ defaultMap.RegisterType(&Type{Name: "bpchar", OID: BPCharOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "bytea", OID: ByteaOID, Codec: ByteaCodec{}})
+ defaultMap.RegisterType(&Type{Name: "char", OID: QCharOID, Codec: QCharCodec{}})
+ defaultMap.RegisterType(&Type{Name: "cid", OID: CIDOID, Codec: Uint32Codec{}})
+ defaultMap.RegisterType(&Type{Name: "cidr", OID: CIDROID, Codec: InetCodec{}})
+ defaultMap.RegisterType(&Type{Name: "circle", OID: CircleOID, Codec: CircleCodec{}})
+ defaultMap.RegisterType(&Type{Name: "date", OID: DateOID, Codec: DateCodec{}})
+ defaultMap.RegisterType(&Type{Name: "float4", OID: Float4OID, Codec: Float4Codec{}})
+ defaultMap.RegisterType(&Type{Name: "float8", OID: Float8OID, Codec: Float8Codec{}})
+ defaultMap.RegisterType(&Type{Name: "inet", OID: InetOID, Codec: InetCodec{}})
+ defaultMap.RegisterType(&Type{Name: "int2", OID: Int2OID, Codec: Int2Codec{}})
+ defaultMap.RegisterType(&Type{Name: "int4", OID: Int4OID, Codec: Int4Codec{}})
+ defaultMap.RegisterType(&Type{Name: "int8", OID: Int8OID, Codec: Int8Codec{}})
+ defaultMap.RegisterType(&Type{Name: "interval", OID: IntervalOID, Codec: IntervalCodec{}})
+ defaultMap.RegisterType(&Type{Name: "json", OID: JSONOID, Codec: &JSONCodec{Marshal: json.Marshal, Unmarshal: json.Unmarshal}})
+ defaultMap.RegisterType(&Type{Name: "jsonb", OID: JSONBOID, Codec: &JSONBCodec{Marshal: json.Marshal, Unmarshal: json.Unmarshal}})
+ defaultMap.RegisterType(&Type{Name: "jsonpath", OID: JSONPathOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
+ defaultMap.RegisterType(&Type{Name: "line", OID: LineOID, Codec: LineCodec{}})
+ defaultMap.RegisterType(&Type{Name: "lseg", OID: LsegOID, Codec: LsegCodec{}})
+ defaultMap.RegisterType(&Type{Name: "macaddr8", OID: Macaddr8OID, Codec: MacaddrCodec{}})
+ defaultMap.RegisterType(&Type{Name: "macaddr", OID: MacaddrOID, Codec: MacaddrCodec{}})
+ defaultMap.RegisterType(&Type{Name: "name", OID: NameOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "numeric", OID: NumericOID, Codec: NumericCodec{}})
+ defaultMap.RegisterType(&Type{Name: "oid", OID: OIDOID, Codec: Uint32Codec{}})
+ defaultMap.RegisterType(&Type{Name: "path", OID: PathOID, Codec: PathCodec{}})
+ defaultMap.RegisterType(&Type{Name: "point", OID: PointOID, Codec: PointCodec{}})
+ defaultMap.RegisterType(&Type{Name: "polygon", OID: PolygonOID, Codec: PolygonCodec{}})
+ defaultMap.RegisterType(&Type{Name: "record", OID: RecordOID, Codec: RecordCodec{}})
+ defaultMap.RegisterType(&Type{Name: "text", OID: TextOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "tid", OID: TIDOID, Codec: TIDCodec{}})
+ defaultMap.RegisterType(&Type{Name: "time", OID: TimeOID, Codec: TimeCodec{}})
+ defaultMap.RegisterType(&Type{Name: "timestamp", OID: TimestampOID, Codec: &TimestampCodec{}})
+ defaultMap.RegisterType(&Type{Name: "timestamptz", OID: TimestamptzOID, Codec: &TimestamptzCodec{}})
+ defaultMap.RegisterType(&Type{Name: "unknown", OID: UnknownOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "uuid", OID: UUIDOID, Codec: UUIDCodec{}})
+ defaultMap.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}})
+ defaultMap.RegisterType(&Type{Name: "varchar", OID: VarcharOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "xid", OID: XIDOID, Codec: Uint32Codec{}})
+ defaultMap.RegisterType(&Type{Name: "xid8", OID: XID8OID, Codec: Uint64Codec{}})
+ defaultMap.RegisterType(&Type{Name: "xml", OID: XMLOID, Codec: &XMLCodec{
+ Marshal: xml.Marshal,
+ // xml.Unmarshal does not support unmarshalling into *any. However, XMLCodec.DecodeValue calls Unmarshal with a
+ // *any. Wrap xml.Marshal with a function that copies the data into a new byte slice in this case. Not implementing
+ // directly in XMLCodec.DecodeValue to allow for the unlikely possibility that someone uses an alternative XML
+ // unmarshaler that does support unmarshalling into *any.
+ //
+ // https://github.com/jackc/pgx/issues/2227
+ // https://github.com/jackc/pgx/pull/2228
+ Unmarshal: func(data []byte, v any) error {
+ if v, ok := v.(*any); ok {
+ dstBuf := make([]byte, len(data))
+ copy(dstBuf, data)
+ *v = dstBuf
+ return nil
+ }
+ return xml.Unmarshal(data, v)
+ },
+ }})
+
+ // Range types
+ defaultMap.RegisterType(&Type{Name: "daterange", OID: DaterangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[DateOID]}})
+ defaultMap.RegisterType(&Type{Name: "int4range", OID: Int4rangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[Int4OID]}})
+ defaultMap.RegisterType(&Type{Name: "int8range", OID: Int8rangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[Int8OID]}})
+ defaultMap.RegisterType(&Type{Name: "numrange", OID: NumrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[NumericOID]}})
+ defaultMap.RegisterType(&Type{Name: "tsrange", OID: TsrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[TimestampOID]}})
+ defaultMap.RegisterType(&Type{Name: "tstzrange", OID: TstzrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[TimestamptzOID]}})
+
+ // Multirange types
+ defaultMap.RegisterType(&Type{Name: "datemultirange", OID: DatemultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[DaterangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "int4multirange", OID: Int4multirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[Int4rangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "int8multirange", OID: Int8multirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[Int8rangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "nummultirange", OID: NummultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[NumrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "tsmultirange", OID: TsmultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[TsrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "tstzmultirange", OID: TstzmultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[TstzrangeOID]}})
+
+ // Array types
+ defaultMap.RegisterType(&Type{Name: "_aclitem", OID: ACLItemArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[ACLItemOID]}})
+ defaultMap.RegisterType(&Type{Name: "_bit", OID: BitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BitOID]}})
+ defaultMap.RegisterType(&Type{Name: "_bool", OID: BoolArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BoolOID]}})
+ defaultMap.RegisterType(&Type{Name: "_box", OID: BoxArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BoxOID]}})
+ defaultMap.RegisterType(&Type{Name: "_bpchar", OID: BPCharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BPCharOID]}})
+ defaultMap.RegisterType(&Type{Name: "_bytea", OID: ByteaArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[ByteaOID]}})
+ defaultMap.RegisterType(&Type{Name: "_char", OID: QCharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[QCharOID]}})
+ defaultMap.RegisterType(&Type{Name: "_cid", OID: CIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_cidr", OID: CIDRArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CIDROID]}})
+ defaultMap.RegisterType(&Type{Name: "_circle", OID: CircleArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CircleOID]}})
+ defaultMap.RegisterType(&Type{Name: "_date", OID: DateArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[DateOID]}})
+ defaultMap.RegisterType(&Type{Name: "_daterange", OID: DaterangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[DaterangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_float4", OID: Float4ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Float4OID]}})
+ defaultMap.RegisterType(&Type{Name: "_float8", OID: Float8ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Float8OID]}})
+ defaultMap.RegisterType(&Type{Name: "_inet", OID: InetArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[InetOID]}})
+ defaultMap.RegisterType(&Type{Name: "_int2", OID: Int2ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int2OID]}})
+ defaultMap.RegisterType(&Type{Name: "_int4", OID: Int4ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int4OID]}})
+ defaultMap.RegisterType(&Type{Name: "_int4range", OID: Int4rangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int4rangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_int8", OID: Int8ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int8OID]}})
+ defaultMap.RegisterType(&Type{Name: "_int8range", OID: Int8rangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int8rangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_interval", OID: IntervalArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[IntervalOID]}})
+ defaultMap.RegisterType(&Type{Name: "_json", OID: JSONArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONOID]}})
+ defaultMap.RegisterType(&Type{Name: "_jsonb", OID: JSONBArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONBOID]}})
+ defaultMap.RegisterType(&Type{Name: "_jsonpath", OID: JSONPathArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONPathOID]}})
+ defaultMap.RegisterType(&Type{Name: "_line", OID: LineArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[LineOID]}})
+ defaultMap.RegisterType(&Type{Name: "_lseg", OID: LsegArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[LsegOID]}})
+ defaultMap.RegisterType(&Type{Name: "_macaddr", OID: MacaddrArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[MacaddrOID]}})
+ defaultMap.RegisterType(&Type{Name: "_name", OID: NameArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NameOID]}})
+ defaultMap.RegisterType(&Type{Name: "_numeric", OID: NumericArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NumericOID]}})
+ defaultMap.RegisterType(&Type{Name: "_numrange", OID: NumrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NumrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_oid", OID: OIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[OIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_path", OID: PathArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PathOID]}})
+ defaultMap.RegisterType(&Type{Name: "_point", OID: PointArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PointOID]}})
+ defaultMap.RegisterType(&Type{Name: "_polygon", OID: PolygonArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PolygonOID]}})
+ defaultMap.RegisterType(&Type{Name: "_record", OID: RecordArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[RecordOID]}})
+ defaultMap.RegisterType(&Type{Name: "_text", OID: TextArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TextOID]}})
+ defaultMap.RegisterType(&Type{Name: "_tid", OID: TIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_time", OID: TimeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_timestamp", OID: TimestampArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimestampOID]}})
+ defaultMap.RegisterType(&Type{Name: "_timestamptz", OID: TimestamptzArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimestamptzOID]}})
+ defaultMap.RegisterType(&Type{Name: "_tsrange", OID: TsrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TsrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_tstzrange", OID: TstzrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TstzrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_uuid", OID: UUIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[UUIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_varbit", OID: VarbitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarbitOID]}})
+ defaultMap.RegisterType(&Type{Name: "_varchar", OID: VarcharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarcharOID]}})
+ defaultMap.RegisterType(&Type{Name: "_xid", OID: XIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_xid8", OID: XID8ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XID8OID]}})
+ defaultMap.RegisterType(&Type{Name: "_xml", OID: XMLArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XMLOID]}})
+
+ // Integer types that directly map to a PostgreSQL type
+ registerDefaultPgTypeVariants[int16](defaultMap, "int2")
+ registerDefaultPgTypeVariants[int32](defaultMap, "int4")
+ registerDefaultPgTypeVariants[int64](defaultMap, "int8")
+
+ // Integer types that do not have a direct match to a PostgreSQL type
+ registerDefaultPgTypeVariants[int8](defaultMap, "int8")
+ registerDefaultPgTypeVariants[int](defaultMap, "int8")
+ registerDefaultPgTypeVariants[uint8](defaultMap, "int8")
+ registerDefaultPgTypeVariants[uint16](defaultMap, "int8")
+ registerDefaultPgTypeVariants[uint32](defaultMap, "int8")
+ registerDefaultPgTypeVariants[uint64](defaultMap, "numeric")
+ registerDefaultPgTypeVariants[uint](defaultMap, "numeric")
+
+ registerDefaultPgTypeVariants[float32](defaultMap, "float4")
+ registerDefaultPgTypeVariants[float64](defaultMap, "float8")
+
+ registerDefaultPgTypeVariants[bool](defaultMap, "bool")
+ registerDefaultPgTypeVariants[time.Time](defaultMap, "timestamptz")
+ registerDefaultPgTypeVariants[time.Duration](defaultMap, "interval")
+ registerDefaultPgTypeVariants[string](defaultMap, "text")
+ registerDefaultPgTypeVariants[json.RawMessage](defaultMap, "json")
+ registerDefaultPgTypeVariants[[]byte](defaultMap, "bytea")
+
+ registerDefaultPgTypeVariants[net.IP](defaultMap, "inet")
+ registerDefaultPgTypeVariants[net.IPNet](defaultMap, "cidr")
+ registerDefaultPgTypeVariants[netip.Addr](defaultMap, "inet")
+ registerDefaultPgTypeVariants[netip.Prefix](defaultMap, "cidr")
+
+ // pgtype provided structs
+ registerDefaultPgTypeVariants[Bits](defaultMap, "varbit")
+ registerDefaultPgTypeVariants[Bool](defaultMap, "bool")
+ registerDefaultPgTypeVariants[Box](defaultMap, "box")
+ registerDefaultPgTypeVariants[Circle](defaultMap, "circle")
+ registerDefaultPgTypeVariants[Date](defaultMap, "date")
+ registerDefaultPgTypeVariants[Range[Date]](defaultMap, "daterange")
+ registerDefaultPgTypeVariants[Multirange[Range[Date]]](defaultMap, "datemultirange")
+ registerDefaultPgTypeVariants[Float4](defaultMap, "float4")
+ registerDefaultPgTypeVariants[Float8](defaultMap, "float8")
+ registerDefaultPgTypeVariants[Range[Float8]](defaultMap, "numrange") // There is no PostgreSQL builtin float8range so map it to numrange.
+ registerDefaultPgTypeVariants[Multirange[Range[Float8]]](defaultMap, "nummultirange") // There is no PostgreSQL builtin float8multirange so map it to nummultirange.
+ registerDefaultPgTypeVariants[Int2](defaultMap, "int2")
+ registerDefaultPgTypeVariants[Int4](defaultMap, "int4")
+ registerDefaultPgTypeVariants[Range[Int4]](defaultMap, "int4range")
+ registerDefaultPgTypeVariants[Multirange[Range[Int4]]](defaultMap, "int4multirange")
+ registerDefaultPgTypeVariants[Int8](defaultMap, "int8")
+ registerDefaultPgTypeVariants[Range[Int8]](defaultMap, "int8range")
+ registerDefaultPgTypeVariants[Multirange[Range[Int8]]](defaultMap, "int8multirange")
+ registerDefaultPgTypeVariants[Interval](defaultMap, "interval")
+ registerDefaultPgTypeVariants[Line](defaultMap, "line")
+ registerDefaultPgTypeVariants[Lseg](defaultMap, "lseg")
+ registerDefaultPgTypeVariants[Numeric](defaultMap, "numeric")
+ registerDefaultPgTypeVariants[Range[Numeric]](defaultMap, "numrange")
+ registerDefaultPgTypeVariants[Multirange[Range[Numeric]]](defaultMap, "nummultirange")
+ registerDefaultPgTypeVariants[Path](defaultMap, "path")
+ registerDefaultPgTypeVariants[Point](defaultMap, "point")
+ registerDefaultPgTypeVariants[Polygon](defaultMap, "polygon")
+ registerDefaultPgTypeVariants[TID](defaultMap, "tid")
+ registerDefaultPgTypeVariants[Text](defaultMap, "text")
+ registerDefaultPgTypeVariants[Time](defaultMap, "time")
+ registerDefaultPgTypeVariants[Timestamp](defaultMap, "timestamp")
+ registerDefaultPgTypeVariants[Timestamptz](defaultMap, "timestamptz")
+ registerDefaultPgTypeVariants[Range[Timestamp]](defaultMap, "tsrange")
+ registerDefaultPgTypeVariants[Multirange[Range[Timestamp]]](defaultMap, "tsmultirange")
+ registerDefaultPgTypeVariants[Range[Timestamptz]](defaultMap, "tstzrange")
+ registerDefaultPgTypeVariants[Multirange[Range[Timestamptz]]](defaultMap, "tstzmultirange")
+ registerDefaultPgTypeVariants[UUID](defaultMap, "uuid")
+
+ defaultMap.buildReflectTypeToType()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/point.go b/vendor/github.com/jackc/pgx/v5/pgtype/point.go
new file mode 100644
index 0000000..09b19bb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/point.go
@@ -0,0 +1,266 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Vec2 struct {
+ X float64
+ Y float64
+}
+
+type PointScanner interface {
+ ScanPoint(v Point) error
+}
+
+type PointValuer interface {
+ PointValue() (Point, error)
+}
+
+type Point struct {
+ P Vec2
+ Valid bool
+}
+
+func (p *Point) ScanPoint(v Point) error {
+ *p = v
+ return nil
+}
+
+func (p Point) PointValue() (Point, error) {
+ return p, nil
+}
+
+func parsePoint(src []byte) (*Point, error) {
+ if src == nil || bytes.Equal(src, []byte("null")) {
+ return &Point{}, nil
+ }
+
+ if len(src) < 5 {
+ return nil, fmt.Errorf("invalid length for point: %v", len(src))
+ }
+ if src[0] == '"' && src[len(src)-1] == '"' {
+ src = src[1 : len(src)-1]
+ }
+ sx, sy, found := strings.Cut(string(src[1:len(src)-1]), ",")
+ if !found {
+ return nil, fmt.Errorf("invalid format for point")
+ }
+
+ x, err := strconv.ParseFloat(sx, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ y, err := strconv.ParseFloat(sy, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Point{P: Vec2{x, y}, Valid: true}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Point) Scan(src any) error {
+ if src == nil {
+ *dst = Point{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToPointScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Point) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := PointCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+func (src Point) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ var buff bytes.Buffer
+ buff.WriteByte('"')
+ buff.WriteString(fmt.Sprintf("(%g,%g)", src.P.X, src.P.Y))
+ buff.WriteByte('"')
+ return buff.Bytes(), nil
+}
+
+func (dst *Point) UnmarshalJSON(point []byte) error {
+ p, err := parsePoint(point)
+ if err != nil {
+ return err
+ }
+ *dst = *p
+ return nil
+}
+
+type PointCodec struct{}
+
+func (PointCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (PointCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (PointCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(PointValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanPointCodecBinary{}
+ case TextFormatCode:
+ return encodePlanPointCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanPointCodecBinary struct{}
+
+func (encodePlanPointCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ point, err := value.(PointValuer).PointValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !point.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(point.P.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(point.P.Y))
+ return buf, nil
+}
+
+type encodePlanPointCodecText struct{}
+
+func (encodePlanPointCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ point, err := value.(PointValuer).PointValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !point.Valid {
+ return nil, nil
+ }
+
+ return append(buf, fmt.Sprintf(`(%s,%s)`,
+ strconv.FormatFloat(point.P.X, 'f', -1, 64),
+ strconv.FormatFloat(point.P.Y, 'f', -1, 64),
+ )...), nil
+}
+
+func (PointCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case PointScanner:
+ return scanPlanBinaryPointToPointScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case PointScanner:
+ return scanPlanTextAnyToPointScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c PointCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c PointCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var point Point
+ err := codecScan(c, m, oid, format, src, &point)
+ if err != nil {
+ return nil, err
+ }
+ return point, nil
+}
+
+type scanPlanBinaryPointToPointScanner struct{}
+
+func (scanPlanBinaryPointToPointScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PointScanner)
+
+ if src == nil {
+ return scanner.ScanPoint(Point{})
+ }
+
+ if len(src) != 16 {
+ return fmt.Errorf("invalid length for point: %v", len(src))
+ }
+
+ x := binary.BigEndian.Uint64(src)
+ y := binary.BigEndian.Uint64(src[8:])
+
+ return scanner.ScanPoint(Point{
+ P: Vec2{math.Float64frombits(x), math.Float64frombits(y)},
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToPointScanner struct{}
+
+func (scanPlanTextAnyToPointScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PointScanner)
+
+ if src == nil {
+ return scanner.ScanPoint(Point{})
+ }
+
+ if len(src) < 5 {
+ return fmt.Errorf("invalid length for point: %v", len(src))
+ }
+
+ sx, sy, found := strings.Cut(string(src[1:len(src)-1]), ",")
+ if !found {
+ return fmt.Errorf("invalid format for point")
+ }
+
+ x, err := strconv.ParseFloat(sx, 64)
+ if err != nil {
+ return err
+ }
+
+ y, err := strconv.ParseFloat(sy, 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanPoint(Point{P: Vec2{x, y}, Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/polygon.go b/vendor/github.com/jackc/pgx/v5/pgtype/polygon.go
new file mode 100644
index 0000000..04b0ba6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/polygon.go
@@ -0,0 +1,253 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type PolygonScanner interface {
+ ScanPolygon(v Polygon) error
+}
+
+type PolygonValuer interface {
+ PolygonValue() (Polygon, error)
+}
+
+type Polygon struct {
+ P []Vec2
+ Valid bool
+}
+
+func (p *Polygon) ScanPolygon(v Polygon) error {
+ *p = v
+ return nil
+}
+
+func (p Polygon) PolygonValue() (Polygon, error) {
+ return p, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (p *Polygon) Scan(src any) error {
+ if src == nil {
+ *p = Polygon{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToPolygonScanner{}.Scan([]byte(src), p)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (p Polygon) Value() (driver.Value, error) {
+ if !p.Valid {
+ return nil, nil
+ }
+
+ buf, err := PolygonCodec{}.PlanEncode(nil, 0, TextFormatCode, p).Encode(p, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return string(buf), err
+}
+
+type PolygonCodec struct{}
+
+func (PolygonCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (PolygonCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (PolygonCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(PolygonValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanPolygonCodecBinary{}
+ case TextFormatCode:
+ return encodePlanPolygonCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanPolygonCodecBinary struct{}
+
+func (encodePlanPolygonCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ polygon, err := value.(PolygonValuer).PolygonValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !polygon.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendInt32(buf, int32(len(polygon.P)))
+
+ for _, p := range polygon.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ }
+
+ return buf, nil
+}
+
+type encodePlanPolygonCodecText struct{}
+
+func (encodePlanPolygonCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ polygon, err := value.(PolygonValuer).PolygonValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !polygon.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, '(')
+
+ for i, p := range polygon.P {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, fmt.Sprintf(`(%s,%s)`,
+ strconv.FormatFloat(p.X, 'f', -1, 64),
+ strconv.FormatFloat(p.Y, 'f', -1, 64),
+ )...)
+ }
+
+ buf = append(buf, ')')
+
+ return buf, nil
+}
+
+func (PolygonCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case PolygonScanner:
+ return scanPlanBinaryPolygonToPolygonScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case PolygonScanner:
+ return scanPlanTextAnyToPolygonScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryPolygonToPolygonScanner struct{}
+
+func (scanPlanBinaryPolygonToPolygonScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PolygonScanner)
+
+ if src == nil {
+ return scanner.ScanPolygon(Polygon{})
+ }
+
+ if len(src) < 5 {
+ return fmt.Errorf("invalid length for polygon: %v", len(src))
+ }
+
+ pointCount := int(binary.BigEndian.Uint32(src))
+ rp := 4
+
+ if 4+pointCount*16 != len(src) {
+ return fmt.Errorf("invalid length for Polygon with %d points: %v", pointCount, len(src))
+ }
+
+ points := make([]Vec2, pointCount)
+ for i := 0; i < len(points); i++ {
+ x := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ y := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ points[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}
+ }
+
+ return scanner.ScanPolygon(Polygon{
+ P: points,
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToPolygonScanner struct{}
+
+func (scanPlanTextAnyToPolygonScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PolygonScanner)
+
+ if src == nil {
+ return scanner.ScanPolygon(Polygon{})
+ }
+
+ if len(src) < 7 {
+ return fmt.Errorf("invalid length for Polygon: %v", len(src))
+ }
+
+ points := make([]Vec2, 0)
+
+ str := string(src[2:])
+
+ for {
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ points = append(points, Vec2{x, y})
+
+ if end+3 < len(str) {
+ str = str[end+3:]
+ } else {
+ break
+ }
+ }
+
+ return scanner.ScanPolygon(Polygon{P: points, Valid: true})
+}
+
+func (c PolygonCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c PolygonCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var polygon Polygon
+ err := codecScan(c, m, oid, format, src, &polygon)
+ if err != nil {
+ return nil, err
+ }
+ return polygon, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/qchar.go b/vendor/github.com/jackc/pgx/v5/pgtype/qchar.go
new file mode 100644
index 0000000..fc40a5b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/qchar.go
@@ -0,0 +1,141 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "math"
+)
+
+// QCharCodec is for PostgreSQL's special 8-bit-only "char" type more akin to the C
+// language's char type, or Go's byte type. (Note that the name in PostgreSQL
+// itself is "char", in double-quotes, and not char.) It gets used a lot in
+// PostgreSQL's system tables to hold a single ASCII character value (eg
+// pg_class.relkind). It is named Qchar for quoted char to disambiguate from SQL
+// standard type char.
+type QCharCodec struct{}
+
+func (QCharCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (QCharCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (QCharCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch value.(type) {
+ case byte:
+ return encodePlanQcharCodecByte{}
+ case rune:
+ return encodePlanQcharCodecRune{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanQcharCodecByte struct{}
+
+func (encodePlanQcharCodecByte) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b := value.(byte)
+ buf = append(buf, b)
+ return buf, nil
+}
+
+type encodePlanQcharCodecRune struct{}
+
+func (encodePlanQcharCodecRune) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ r := value.(rune)
+ if r > math.MaxUint8 {
+ return nil, fmt.Errorf(`%v cannot be encoded to "char"`, r)
+ }
+ b := byte(r)
+ buf = append(buf, b)
+ return buf, nil
+}
+
+func (QCharCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch target.(type) {
+ case *byte:
+ return scanPlanQcharCodecByte{}
+ case *rune:
+ return scanPlanQcharCodecRune{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanQcharCodecByte struct{}
+
+func (scanPlanQcharCodecByte) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) > 1 {
+ return fmt.Errorf(`invalid length for "char": %v`, len(src))
+ }
+
+ b := dst.(*byte)
+ // In the text format the zero value is returned as a zero byte value instead of 0
+ if len(src) == 0 {
+ *b = 0
+ } else {
+ *b = src[0]
+ }
+
+ return nil
+}
+
+type scanPlanQcharCodecRune struct{}
+
+func (scanPlanQcharCodecRune) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) > 1 {
+ return fmt.Errorf(`invalid length for "char": %v`, len(src))
+ }
+
+ r := dst.(*rune)
+ // In the text format the zero value is returned as a zero byte value instead of 0
+ if len(src) == 0 {
+ *r = 0
+ } else {
+ *r = rune(src[0])
+ }
+
+ return nil
+}
+
+func (c QCharCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var r rune
+ err := codecScan(c, m, oid, format, src, &r)
+ if err != nil {
+ return nil, err
+ }
+ return string(r), nil
+}
+
+func (c QCharCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var r rune
+ err := codecScan(c, m, oid, format, src, &r)
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/range.go b/vendor/github.com/jackc/pgx/v5/pgtype/range.go
new file mode 100644
index 0000000..16427cc
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/range.go
@@ -0,0 +1,322 @@
+package pgtype
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+)
+
+type BoundType byte
+
+const (
+ Inclusive = BoundType('i')
+ Exclusive = BoundType('e')
+ Unbounded = BoundType('U')
+ Empty = BoundType('E')
+)
+
+func (bt BoundType) String() string {
+ return string(bt)
+}
+
+type untypedTextRange struct {
+ Lower string
+ Upper string
+ LowerType BoundType
+ UpperType BoundType
+}
+
+func parseUntypedTextRange(src string) (*untypedTextRange, error) {
+ utr := &untypedTextRange{}
+ if src == "empty" {
+ utr.LowerType = Empty
+ utr.UpperType = Empty
+ return utr, nil
+ }
+
+ buf := bytes.NewBufferString(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid lower bound: %w", err)
+ }
+ switch r {
+ case '(':
+ utr.LowerType = Exclusive
+ case '[':
+ utr.LowerType = Inclusive
+ default:
+ return nil, fmt.Errorf("missing lower bound, instead got: %v", string(r))
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid lower value: %w", err)
+ }
+ buf.UnreadRune()
+
+ if r == ',' {
+ utr.LowerType = Unbounded
+ } else {
+ utr.Lower, err = rangeParseValue(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid lower value: %w", err)
+ }
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("missing range separator: %w", err)
+ }
+ if r != ',' {
+ return nil, fmt.Errorf("missing range separator: %v", r)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid upper value: %w", err)
+ }
+
+ if r == ')' || r == ']' {
+ utr.UpperType = Unbounded
+ } else {
+ buf.UnreadRune()
+ utr.Upper, err = rangeParseValue(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid upper value: %w", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("missing upper bound: %w", err)
+ }
+ switch r {
+ case ')':
+ utr.UpperType = Exclusive
+ case ']':
+ utr.UpperType = Inclusive
+ default:
+ return nil, fmt.Errorf("missing upper bound, instead got: %v", string(r))
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, fmt.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ return utr, nil
+}
+
+func rangeParseValue(buf *bytes.Buffer) (string, error) {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r == '"' {
+ return rangeParseQuotedValue(buf)
+ }
+ buf.UnreadRune()
+
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case ',', '[', ']', '(', ')':
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+func rangeParseQuotedValue(buf *bytes.Buffer) (string, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case '"':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r != '"' {
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+ }
+ s.WriteRune(r)
+ }
+}
+
+type untypedBinaryRange struct {
+ Lower []byte
+ Upper []byte
+ LowerType BoundType
+ UpperType BoundType
+}
+
+// 0 = () = 00000
+// 1 = empty = 00001
+// 2 = [) = 00010
+// 4 = (] = 00100
+// 6 = [] = 00110
+// 8 = ) = 01000
+// 12 = ] = 01100
+// 16 = ( = 10000
+// 18 = [ = 10010
+// 24 = = 11000
+
+const emptyMask = 1
+const lowerInclusiveMask = 2
+const upperInclusiveMask = 4
+const lowerUnboundedMask = 8
+const upperUnboundedMask = 16
+
+func parseUntypedBinaryRange(src []byte) (*untypedBinaryRange, error) {
+ ubr := &untypedBinaryRange{}
+
+ if len(src) == 0 {
+ return nil, fmt.Errorf("range too short: %v", len(src))
+ }
+
+ rangeType := src[0]
+ rp := 1
+
+ if rangeType&emptyMask > 0 {
+ if len(src[rp:]) > 0 {
+ return nil, fmt.Errorf("unexpected trailing bytes parsing empty range: %v", len(src[rp:]))
+ }
+ ubr.LowerType = Empty
+ ubr.UpperType = Empty
+ return ubr, nil
+ }
+
+ if rangeType&lowerInclusiveMask > 0 {
+ ubr.LowerType = Inclusive
+ } else if rangeType&lowerUnboundedMask > 0 {
+ ubr.LowerType = Unbounded
+ } else {
+ ubr.LowerType = Exclusive
+ }
+
+ if rangeType&upperInclusiveMask > 0 {
+ ubr.UpperType = Inclusive
+ } else if rangeType&upperUnboundedMask > 0 {
+ ubr.UpperType = Unbounded
+ } else {
+ ubr.UpperType = Exclusive
+ }
+
+ if ubr.LowerType == Unbounded && ubr.UpperType == Unbounded {
+ if len(src[rp:]) > 0 {
+ return nil, fmt.Errorf("unexpected trailing bytes parsing unbounded range: %v", len(src[rp:]))
+ }
+ return ubr, nil
+ }
+
+ if len(src[rp:]) < 4 {
+ return nil, fmt.Errorf("too few bytes for size: %v", src[rp:])
+ }
+ valueLen := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ val := src[rp : rp+valueLen]
+ rp += valueLen
+
+ if ubr.LowerType != Unbounded {
+ ubr.Lower = val
+ } else {
+ ubr.Upper = val
+ if len(src[rp:]) > 0 {
+ return nil, fmt.Errorf("unexpected trailing bytes parsing range: %v", len(src[rp:]))
+ }
+ return ubr, nil
+ }
+
+ if ubr.UpperType != Unbounded {
+ if len(src[rp:]) < 4 {
+ return nil, fmt.Errorf("too few bytes for size: %v", src[rp:])
+ }
+ valueLen := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ ubr.Upper = src[rp : rp+valueLen]
+ rp += valueLen
+ }
+
+ if len(src[rp:]) > 0 {
+ return nil, fmt.Errorf("unexpected trailing bytes parsing range: %v", len(src[rp:]))
+ }
+
+ return ubr, nil
+
+}
+
+// Range is a generic range type.
+type Range[T any] struct {
+ Lower T
+ Upper T
+ LowerType BoundType
+ UpperType BoundType
+ Valid bool
+}
+
+func (r Range[T]) IsNull() bool {
+ return !r.Valid
+}
+
+func (r Range[T]) BoundTypes() (lower, upper BoundType) {
+ return r.LowerType, r.UpperType
+}
+
+func (r Range[T]) Bounds() (lower, upper any) {
+ return &r.Lower, &r.Upper
+}
+
+func (r *Range[T]) ScanNull() error {
+ *r = Range[T]{}
+ return nil
+}
+
+func (r *Range[T]) ScanBounds() (lowerTarget, upperTarget any) {
+ return &r.Lower, &r.Upper
+}
+
+func (r *Range[T]) SetBoundTypes(lower, upper BoundType) error {
+ if lower == Unbounded || lower == Empty {
+ var zero T
+ r.Lower = zero
+ }
+ if upper == Unbounded || upper == Empty {
+ var zero T
+ r.Upper = zero
+ }
+ r.LowerType = lower
+ r.UpperType = upper
+ r.Valid = true
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go
new file mode 100644
index 0000000..684f1bf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go
@@ -0,0 +1,379 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// RangeValuer is a type that can be converted into a PostgreSQL range.
+type RangeValuer interface {
+ // IsNull returns true if the value is SQL NULL.
+ IsNull() bool
+
+ // BoundTypes returns the lower and upper bound types.
+ BoundTypes() (lower, upper BoundType)
+
+ // Bounds returns the lower and upper range values.
+ Bounds() (lower, upper any)
+}
+
+// RangeScanner is a type can be scanned from a PostgreSQL range.
+type RangeScanner interface {
+ // ScanNull sets the value to SQL NULL.
+ ScanNull() error
+
+ // ScanBounds returns values usable as a scan target. The returned values may not be scanned if the range is empty or
+ // the bound type is unbounded.
+ ScanBounds() (lowerTarget, upperTarget any)
+
+ // SetBoundTypes sets the lower and upper bound types. ScanBounds will be called and the returned values scanned
+ // (if appropriate) before SetBoundTypes is called. If the bound types are unbounded or empty this method must
+ // also set the bound values.
+ SetBoundTypes(lower, upper BoundType) error
+}
+
+// RangeCodec is a codec for any range type.
+type RangeCodec struct {
+ ElementType *Type
+}
+
+func (c *RangeCodec) FormatSupported(format int16) bool {
+ return c.ElementType.Codec.FormatSupported(format)
+}
+
+func (c *RangeCodec) PreferredFormat() int16 {
+ if c.FormatSupported(BinaryFormatCode) {
+ return BinaryFormatCode
+ }
+ return TextFormatCode
+}
+
+func (c *RangeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(RangeValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return &encodePlanRangeCodecRangeValuerToBinary{rc: c, m: m}
+ case TextFormatCode:
+ return &encodePlanRangeCodecRangeValuerToText{rc: c, m: m}
+ }
+
+ return nil
+}
+
+type encodePlanRangeCodecRangeValuerToBinary struct {
+ rc *RangeCodec
+ m *Map
+}
+
+func (plan *encodePlanRangeCodecRangeValuerToBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ getter := value.(RangeValuer)
+
+ if getter.IsNull() {
+ return nil, nil
+ }
+
+ lowerType, upperType := getter.BoundTypes()
+ lower, upper := getter.Bounds()
+
+ var rangeType byte
+ switch lowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, fmt.Errorf("unknown LowerType: %v", lowerType)
+ }
+
+ switch upperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, fmt.Errorf("unknown UpperType: %v", upperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ if lowerType != Unbounded {
+ if lower == nil {
+ return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ }
+
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ lowerPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, BinaryFormatCode, lower)
+ if lowerPlan == nil {
+ return nil, fmt.Errorf("cannot encode %v as element of range", lower)
+ }
+
+ buf, err = lowerPlan.Encode(lower, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode %v as element of range: %w", lower, err)
+ }
+ if buf == nil {
+ return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if upperType != Unbounded {
+ if upper == nil {
+ return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ }
+
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ upperPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, BinaryFormatCode, upper)
+ if upperPlan == nil {
+ return nil, fmt.Errorf("cannot encode %v as element of range", upper)
+ }
+
+ buf, err = upperPlan.Encode(upper, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode %v as element of range: %w", upper, err)
+ }
+ if buf == nil {
+ return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+type encodePlanRangeCodecRangeValuerToText struct {
+ rc *RangeCodec
+ m *Map
+}
+
+func (plan *encodePlanRangeCodecRangeValuerToText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ getter := value.(RangeValuer)
+
+ if getter.IsNull() {
+ return nil, nil
+ }
+
+ lowerType, upperType := getter.BoundTypes()
+ lower, upper := getter.Bounds()
+
+ switch lowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, fmt.Errorf("unknown lower bound type %v", lowerType)
+ }
+
+ if lowerType != Unbounded {
+ if lower == nil {
+ return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ }
+
+ lowerPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, TextFormatCode, lower)
+ if lowerPlan == nil {
+ return nil, fmt.Errorf("cannot encode %v as element of range", lower)
+ }
+
+ buf, err = lowerPlan.Encode(lower, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode %v as element of range: %w", lower, err)
+ }
+ if buf == nil {
+ return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if upperType != Unbounded {
+ if upper == nil {
+ return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ }
+
+ upperPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, TextFormatCode, upper)
+ if upperPlan == nil {
+ return nil, fmt.Errorf("cannot encode %v as element of range", upper)
+ }
+
+ buf, err = upperPlan.Encode(upper, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode %v as element of range: %w", upper, err)
+ }
+ if buf == nil {
+ return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ }
+ }
+
+ switch upperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, fmt.Errorf("unknown upper bound type %v", upperType)
+ }
+
+ return buf, nil
+}
+
+func (c *RangeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case RangeScanner:
+ return &scanPlanBinaryRangeToRangeScanner{rc: c, m: m}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case RangeScanner:
+ return &scanPlanTextRangeToRangeScanner{rc: c, m: m}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryRangeToRangeScanner struct {
+ rc *RangeCodec
+ m *Map
+}
+
+func (plan *scanPlanBinaryRangeToRangeScanner) Scan(src []byte, target any) error {
+ rangeScanner := (target).(RangeScanner)
+
+ if src == nil {
+ return rangeScanner.ScanNull()
+ }
+
+ ubr, err := parseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ if ubr.LowerType == Empty {
+ return rangeScanner.SetBoundTypes(ubr.LowerType, ubr.UpperType)
+ }
+
+ lowerTarget, upperTarget := rangeScanner.ScanBounds()
+
+ if ubr.LowerType == Inclusive || ubr.LowerType == Exclusive {
+ lowerPlan := plan.m.PlanScan(plan.rc.ElementType.OID, BinaryFormatCode, lowerTarget)
+ if lowerPlan == nil {
+ return fmt.Errorf("cannot scan into %v from range element", lowerTarget)
+ }
+
+ err = lowerPlan.Scan(ubr.Lower, lowerTarget)
+ if err != nil {
+ return fmt.Errorf("cannot scan into %v from range element: %w", lowerTarget, err)
+ }
+ }
+
+ if ubr.UpperType == Inclusive || ubr.UpperType == Exclusive {
+ upperPlan := plan.m.PlanScan(plan.rc.ElementType.OID, BinaryFormatCode, upperTarget)
+ if upperPlan == nil {
+ return fmt.Errorf("cannot scan into %v from range element", upperTarget)
+ }
+
+ err = upperPlan.Scan(ubr.Upper, upperTarget)
+ if err != nil {
+ return fmt.Errorf("cannot scan into %v from range element: %w", upperTarget, err)
+ }
+ }
+
+ return rangeScanner.SetBoundTypes(ubr.LowerType, ubr.UpperType)
+}
+
+type scanPlanTextRangeToRangeScanner struct {
+ rc *RangeCodec
+ m *Map
+}
+
+func (plan *scanPlanTextRangeToRangeScanner) Scan(src []byte, target any) error {
+ rangeScanner := (target).(RangeScanner)
+
+ if src == nil {
+ return rangeScanner.ScanNull()
+ }
+
+ utr, err := parseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ if utr.LowerType == Empty {
+ return rangeScanner.SetBoundTypes(utr.LowerType, utr.UpperType)
+ }
+
+ lowerTarget, upperTarget := rangeScanner.ScanBounds()
+
+ if utr.LowerType == Inclusive || utr.LowerType == Exclusive {
+ lowerPlan := plan.m.PlanScan(plan.rc.ElementType.OID, TextFormatCode, lowerTarget)
+ if lowerPlan == nil {
+ return fmt.Errorf("cannot scan into %v from range element", lowerTarget)
+ }
+
+ err = lowerPlan.Scan([]byte(utr.Lower), lowerTarget)
+ if err != nil {
+ return fmt.Errorf("cannot scan into %v from range element: %w", lowerTarget, err)
+ }
+ }
+
+ if utr.UpperType == Inclusive || utr.UpperType == Exclusive {
+ upperPlan := plan.m.PlanScan(plan.rc.ElementType.OID, TextFormatCode, upperTarget)
+ if upperPlan == nil {
+ return fmt.Errorf("cannot scan into %v from range element", upperTarget)
+ }
+
+ err = upperPlan.Scan([]byte(utr.Upper), upperTarget)
+ if err != nil {
+ return fmt.Errorf("cannot scan into %v from range element: %w", upperTarget, err)
+ }
+ }
+
+ return rangeScanner.SetBoundTypes(utr.LowerType, utr.UpperType)
+}
+
+func (c *RangeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (c *RangeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var r Range[any]
+ err := c.PlanScan(m, oid, format, &r).Scan(src, &r)
+ return r, err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/record_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/record_codec.go
new file mode 100644
index 0000000..b3b1660
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/record_codec.go
@@ -0,0 +1,125 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// ArrayGetter is a type that can be converted into a PostgreSQL array.
+
+// RecordCodec is a codec for the generic PostgreSQL record type such as is created with the "row" function. Record can
+// only decode the binary format. The text format output format from PostgreSQL does not include type information and
+// is therefore impossible to decode. Encoding is impossible because PostgreSQL does not support input of generic
+// records.
+type RecordCodec struct{}
+
+func (RecordCodec) FormatSupported(format int16) bool {
+ return format == BinaryFormatCode
+}
+
+func (RecordCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (RecordCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ return nil
+}
+
+func (RecordCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ if format == BinaryFormatCode {
+ switch target.(type) {
+ case CompositeIndexScanner:
+ return &scanPlanBinaryRecordToCompositeIndexScanner{m: m}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryRecordToCompositeIndexScanner struct {
+ m *Map
+}
+
+func (plan *scanPlanBinaryRecordToCompositeIndexScanner) Scan(src []byte, target any) error {
+ targetScanner := (target).(CompositeIndexScanner)
+
+ if src == nil {
+ return targetScanner.ScanNull()
+ }
+
+ scanner := NewCompositeBinaryScanner(plan.m, src)
+ for i := 0; scanner.Next(); i++ {
+ fieldTarget := targetScanner.ScanIndex(i)
+ if fieldTarget != nil {
+ fieldPlan := plan.m.PlanScan(scanner.OID(), BinaryFormatCode, fieldTarget)
+ if fieldPlan == nil {
+ return fmt.Errorf("unable to scan OID %d in binary format into %v", scanner.OID(), fieldTarget)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), fieldTarget)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (RecordCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (RecordCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ scanner := NewCompositeBinaryScanner(m, src)
+ values := make([]any, scanner.FieldCount())
+ for i := 0; scanner.Next(); i++ {
+ var v any
+ fieldPlan := m.PlanScan(scanner.OID(), BinaryFormatCode, &v)
+ if fieldPlan == nil {
+ return nil, fmt.Errorf("unable to scan OID %d in binary format into %v", scanner.OID(), v)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), &v)
+ if err != nil {
+ return nil, err
+ }
+
+ values[i] = v
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return values, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types.go b/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types.go
new file mode 100644
index 0000000..be1ca4a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types.go
@@ -0,0 +1,35 @@
+//go:build !nopgxregisterdefaulttypes
+
+package pgtype
+
+func registerDefaultPgTypeVariants[T any](m *Map, name string) {
+ arrayName := "_" + name
+
+ var value T
+ m.RegisterDefaultPgType(value, name) // T
+ m.RegisterDefaultPgType(&value, name) // *T
+
+ var sliceT []T
+ m.RegisterDefaultPgType(sliceT, arrayName) // []T
+ m.RegisterDefaultPgType(&sliceT, arrayName) // *[]T
+
+ var slicePtrT []*T
+ m.RegisterDefaultPgType(slicePtrT, arrayName) // []*T
+ m.RegisterDefaultPgType(&slicePtrT, arrayName) // *[]*T
+
+ var arrayOfT Array[T]
+ m.RegisterDefaultPgType(arrayOfT, arrayName) // Array[T]
+ m.RegisterDefaultPgType(&arrayOfT, arrayName) // *Array[T]
+
+ var arrayOfPtrT Array[*T]
+ m.RegisterDefaultPgType(arrayOfPtrT, arrayName) // Array[*T]
+ m.RegisterDefaultPgType(&arrayOfPtrT, arrayName) // *Array[*T]
+
+ var flatArrayOfT FlatArray[T]
+ m.RegisterDefaultPgType(flatArrayOfT, arrayName) // FlatArray[T]
+ m.RegisterDefaultPgType(&flatArrayOfT, arrayName) // *FlatArray[T]
+
+ var flatArrayOfPtrT FlatArray[*T]
+ m.RegisterDefaultPgType(flatArrayOfPtrT, arrayName) // FlatArray[*T]
+ m.RegisterDefaultPgType(&flatArrayOfPtrT, arrayName) // *FlatArray[*T]
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types_disabled.go b/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types_disabled.go
new file mode 100644
index 0000000..56fe7c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types_disabled.go
@@ -0,0 +1,6 @@
+//go:build nopgxregisterdefaulttypes
+
+package pgtype
+
+func registerDefaultPgTypeVariants[T any](m *Map, name string) {
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/text.go b/vendor/github.com/jackc/pgx/v5/pgtype/text.go
new file mode 100644
index 0000000..021ee33
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/text.go
@@ -0,0 +1,223 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+)
+
+type TextScanner interface {
+ ScanText(v Text) error
+}
+
+type TextValuer interface {
+ TextValue() (Text, error)
+}
+
+type Text struct {
+ String string
+ Valid bool
+}
+
+func (t *Text) ScanText(v Text) error {
+ *t = v
+ return nil
+}
+
+func (t Text) TextValue() (Text, error) {
+ return t, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Text) Scan(src any) error {
+ if src == nil {
+ *dst = Text{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ *dst = Text{String: src, Valid: true}
+ return nil
+ case []byte:
+ *dst = Text{String: string(src), Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Text) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return src.String, nil
+}
+
+func (src Text) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ return json.Marshal(src.String)
+}
+
+func (dst *Text) UnmarshalJSON(b []byte) error {
+ var s *string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ if s == nil {
+ *dst = Text{}
+ } else {
+ *dst = Text{String: *s, Valid: true}
+ }
+
+ return nil
+}
+
+type TextCodec struct{}
+
+func (TextCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (TextCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (TextCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch value.(type) {
+ case string:
+ return encodePlanTextCodecString{}
+ case []byte:
+ return encodePlanTextCodecByteSlice{}
+ case TextValuer:
+ return encodePlanTextCodecTextValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanTextCodecString struct{}
+
+func (encodePlanTextCodecString) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ s := value.(string)
+ buf = append(buf, s...)
+ return buf, nil
+}
+
+type encodePlanTextCodecByteSlice struct{}
+
+func (encodePlanTextCodecByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ s := value.([]byte)
+ buf = append(buf, s...)
+ return buf, nil
+}
+
+type encodePlanTextCodecStringer struct{}
+
+func (encodePlanTextCodecStringer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ s := value.(fmt.Stringer)
+ buf = append(buf, s.String()...)
+ return buf, nil
+}
+
+type encodePlanTextCodecTextValuer struct{}
+
+func (encodePlanTextCodecTextValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ text, err := value.(TextValuer).TextValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !text.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, text.String...)
+ return buf, nil
+}
+
+func (TextCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch target.(type) {
+ case *string:
+ return scanPlanTextAnyToString{}
+ case *[]byte:
+ return scanPlanAnyToNewByteSlice{}
+ case BytesScanner:
+ return scanPlanAnyToByteScanner{}
+ case TextScanner:
+ return scanPlanTextAnyToTextScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c TextCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c TextCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ return string(src), nil
+}
+
+type scanPlanTextAnyToString struct{}
+
+func (scanPlanTextAnyToString) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p := (dst).(*string)
+ *p = string(src)
+
+ return nil
+}
+
+type scanPlanAnyToNewByteSlice struct{}
+
+func (scanPlanAnyToNewByteSlice) Scan(src []byte, dst any) error {
+ p := (dst).(*[]byte)
+ if src == nil {
+ *p = nil
+ } else {
+ *p = make([]byte, len(src))
+ copy(*p, src)
+ }
+
+ return nil
+}
+
+type scanPlanAnyToByteScanner struct{}
+
+func (scanPlanAnyToByteScanner) Scan(src []byte, dst any) error {
+ p := (dst).(BytesScanner)
+ return p.ScanBytes(src)
+}
+
+type scanPlanTextAnyToTextScanner struct{}
+
+func (scanPlanTextAnyToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ return scanner.ScanText(Text{String: string(src), Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/text_format_only_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/text_format_only_codec.go
new file mode 100644
index 0000000..d5e4cdb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/text_format_only_codec.go
@@ -0,0 +1,13 @@
+package pgtype
+
+type TextFormatOnlyCodec struct {
+ Codec
+}
+
+func (c *TextFormatOnlyCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode && c.Codec.FormatSupported(format)
+}
+
+func (TextFormatOnlyCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/tid.go b/vendor/github.com/jackc/pgx/v5/pgtype/tid.go
new file mode 100644
index 0000000..9bc2c2a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/tid.go
@@ -0,0 +1,241 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type TIDScanner interface {
+ ScanTID(v TID) error
+}
+
+type TIDValuer interface {
+ TIDValue() (TID, error)
+}
+
+// TID is PostgreSQL's Tuple Identifier type.
+//
+// When one does
+//
+// select ctid, * from some_table;
+//
+// it is the data type of the ctid hidden system column.
+//
+// It is currently implemented as a pair unsigned two byte integers.
+// Its conversion functions can be found in src/backend/utils/adt/tid.c
+// in the PostgreSQL sources.
+type TID struct {
+ BlockNumber uint32
+ OffsetNumber uint16
+ Valid bool
+}
+
+func (b *TID) ScanTID(v TID) error {
+ *b = v
+ return nil
+}
+
+func (b TID) TIDValue() (TID, error) {
+ return b, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TID) Scan(src any) error {
+ if src == nil {
+ *dst = TID{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToTIDScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src TID) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := TIDCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type TIDCodec struct{}
+
+func (TIDCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (TIDCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (TIDCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(TIDValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanTIDCodecBinary{}
+ case TextFormatCode:
+ return encodePlanTIDCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanTIDCodecBinary struct{}
+
+func (encodePlanTIDCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ tid, err := value.(TIDValuer).TIDValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !tid.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint32(buf, tid.BlockNumber)
+ buf = pgio.AppendUint16(buf, tid.OffsetNumber)
+ return buf, nil
+}
+
+type encodePlanTIDCodecText struct{}
+
+func (encodePlanTIDCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ tid, err := value.(TIDValuer).TIDValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !tid.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%d,%d)`, tid.BlockNumber, tid.OffsetNumber)...)
+ return buf, nil
+}
+
+func (TIDCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case TIDScanner:
+ return scanPlanBinaryTIDToTIDScanner{}
+ case TextScanner:
+ return scanPlanBinaryTIDToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case TIDScanner:
+ return scanPlanTextAnyToTIDScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryTIDToTIDScanner struct{}
+
+func (scanPlanBinaryTIDToTIDScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TIDScanner)
+
+ if src == nil {
+ return scanner.ScanTID(TID{})
+ }
+
+ if len(src) != 6 {
+ return fmt.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ return scanner.ScanTID(TID{
+ BlockNumber: binary.BigEndian.Uint32(src),
+ OffsetNumber: binary.BigEndian.Uint16(src[4:]),
+ Valid: true,
+ })
+}
+
+type scanPlanBinaryTIDToTextScanner struct{}
+
+func (scanPlanBinaryTIDToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ if len(src) != 6 {
+ return fmt.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ blockNumber := binary.BigEndian.Uint32(src)
+ offsetNumber := binary.BigEndian.Uint16(src[4:])
+
+ return scanner.ScanText(Text{
+ String: fmt.Sprintf(`(%d,%d)`, blockNumber, offsetNumber),
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToTIDScanner struct{}
+
+func (scanPlanTextAnyToTIDScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TIDScanner)
+
+ if src == nil {
+ return scanner.ScanTID(TID{})
+ }
+
+ if len(src) < 5 {
+ return fmt.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ block, offset, found := strings.Cut(string(src[1:len(src)-1]), ",")
+ if !found {
+ return fmt.Errorf("invalid format for tid")
+ }
+
+ blockNumber, err := strconv.ParseUint(block, 10, 32)
+ if err != nil {
+ return err
+ }
+
+ offsetNumber, err := strconv.ParseUint(offset, 10, 16)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanTID(TID{BlockNumber: uint32(blockNumber), OffsetNumber: uint16(offsetNumber), Valid: true})
+}
+
+func (c TIDCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c TIDCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var tid TID
+ err := codecScan(c, m, oid, format, src, &tid)
+ if err != nil {
+ return nil, err
+ }
+ return tid, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/time.go b/vendor/github.com/jackc/pgx/v5/pgtype/time.go
new file mode 100644
index 0000000..f8fd948
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/time.go
@@ -0,0 +1,274 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type TimeScanner interface {
+ ScanTime(v Time) error
+}
+
+type TimeValuer interface {
+ TimeValue() (Time, error)
+}
+
+// Time represents the PostgreSQL time type. The PostgreSQL time is a time of day without time zone.
+//
+// Time is represented as the number of microseconds since midnight in the same way that PostgreSQL does. Other time and
+// date types in pgtype can use time.Time as the underlying representation. However, pgtype.Time type cannot due to
+// needing to handle 24:00:00. time.Time converts that to 00:00:00 on the following day.
+//
+// The time with time zone type is not supported. Use of time with time zone is discouraged by the PostgreSQL documentation.
+type Time struct {
+ Microseconds int64 // Number of microseconds since midnight
+ Valid bool
+}
+
+func (t *Time) ScanTime(v Time) error {
+ *t = v
+ return nil
+}
+
+func (t Time) TimeValue() (Time, error) {
+ return t, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (t *Time) Scan(src any) error {
+ if src == nil {
+ *t = Time{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ err := scanPlanTextAnyToTimeScanner{}.Scan([]byte(src), t)
+ if err != nil {
+ t.Microseconds = 0
+ t.Valid = false
+ }
+ return err
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (t Time) Value() (driver.Value, error) {
+ if !t.Valid {
+ return nil, nil
+ }
+
+ buf, err := TimeCodec{}.PlanEncode(nil, 0, TextFormatCode, t).Encode(t, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type TimeCodec struct{}
+
+func (TimeCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (TimeCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (TimeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(TimeValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanTimeCodecBinary{}
+ case TextFormatCode:
+ return encodePlanTimeCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanTimeCodecBinary struct{}
+
+func (encodePlanTimeCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TimeValuer).TimeValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !t.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendInt64(buf, t.Microseconds), nil
+}
+
+type encodePlanTimeCodecText struct{}
+
+func (encodePlanTimeCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TimeValuer).TimeValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !t.Valid {
+ return nil, nil
+ }
+
+ usec := t.Microseconds
+ hours := usec / microsecondsPerHour
+ usec -= hours * microsecondsPerHour
+ minutes := usec / microsecondsPerMinute
+ usec -= minutes * microsecondsPerMinute
+ seconds := usec / microsecondsPerSecond
+ usec -= seconds * microsecondsPerSecond
+
+ s := fmt.Sprintf("%02d:%02d:%02d.%06d", hours, minutes, seconds, usec)
+
+ return append(buf, s...), nil
+}
+
+func (TimeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case TimeScanner:
+ return scanPlanBinaryTimeToTimeScanner{}
+ case TextScanner:
+ return scanPlanBinaryTimeToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case TimeScanner:
+ return scanPlanTextAnyToTimeScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryTimeToTimeScanner struct{}
+
+func (scanPlanBinaryTimeToTimeScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimeScanner)
+
+ if src == nil {
+ return scanner.ScanTime(Time{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for time: %v", len(src))
+ }
+
+ usec := int64(binary.BigEndian.Uint64(src))
+
+ return scanner.ScanTime(Time{Microseconds: usec, Valid: true})
+}
+
+type scanPlanBinaryTimeToTextScanner struct{}
+
+func (scanPlanBinaryTimeToTextScanner) Scan(src []byte, dst any) error {
+ ts, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return ts.ScanText(Text{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for time: %v", len(src))
+ }
+
+ usec := int64(binary.BigEndian.Uint64(src))
+
+ tim := Time{Microseconds: usec, Valid: true}
+
+ buf, err := TimeCodec{}.PlanEncode(nil, 0, TextFormatCode, tim).Encode(tim, nil)
+ if err != nil {
+ return err
+ }
+
+ return ts.ScanText(Text{String: string(buf), Valid: true})
+}
+
+type scanPlanTextAnyToTimeScanner struct{}
+
+func (scanPlanTextAnyToTimeScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimeScanner)
+
+ if src == nil {
+ return scanner.ScanTime(Time{})
+ }
+
+ s := string(src)
+
+ if len(s) < 8 || s[2] != ':' || s[5] != ':' {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+
+ hours, err := strconv.ParseInt(s[0:2], 10, 64)
+ if err != nil {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+ usec := hours * microsecondsPerHour
+
+ minutes, err := strconv.ParseInt(s[3:5], 10, 64)
+ if err != nil {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+ usec += minutes * microsecondsPerMinute
+
+ seconds, err := strconv.ParseInt(s[6:8], 10, 64)
+ if err != nil {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+ usec += seconds * microsecondsPerSecond
+
+ if len(s) > 9 {
+ if s[8] != '.' || len(s) > 15 {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+
+ fraction := s[9:]
+ n, err := strconv.ParseInt(fraction, 10, 64)
+ if err != nil {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+
+ for i := len(fraction); i < 6; i++ {
+ n *= 10
+ }
+
+ usec += n
+ }
+
+ return scanner.ScanTime(Time{Microseconds: usec, Valid: true})
+}
+
+func (c TimeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c TimeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var t Time
+ err := codecScan(c, m, oid, format, src, &t)
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go b/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go
new file mode 100644
index 0000000..c31f2ac
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go
@@ -0,0 +1,364 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const pgTimestampFormat = "2006-01-02 15:04:05.999999999"
+const jsonISO8601 = "2006-01-02T15:04:05.999999999"
+
+type TimestampScanner interface {
+ ScanTimestamp(v Timestamp) error
+}
+
+type TimestampValuer interface {
+ TimestampValue() (Timestamp, error)
+}
+
+// Timestamp represents the PostgreSQL timestamp type.
+type Timestamp struct {
+ Time time.Time // Time zone will be ignored when encoding to PostgreSQL.
+ InfinityModifier InfinityModifier
+ Valid bool
+}
+
+func (ts *Timestamp) ScanTimestamp(v Timestamp) error {
+ *ts = v
+ return nil
+}
+
+func (ts Timestamp) TimestampValue() (Timestamp, error) {
+ return ts, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (ts *Timestamp) Scan(src any) error {
+ if src == nil {
+ *ts = Timestamp{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return (&scanPlanTextTimestampToTimestampScanner{}).Scan([]byte(src), ts)
+ case time.Time:
+ *ts = Timestamp{Time: src, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (ts Timestamp) Value() (driver.Value, error) {
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ if ts.InfinityModifier != Finite {
+ return ts.InfinityModifier.String(), nil
+ }
+ return ts.Time, nil
+}
+
+func (ts Timestamp) MarshalJSON() ([]byte, error) {
+ if !ts.Valid {
+ return []byte("null"), nil
+ }
+
+ var s string
+
+ switch ts.InfinityModifier {
+ case Finite:
+ s = ts.Time.Format(jsonISO8601)
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return json.Marshal(s)
+}
+
+func (ts *Timestamp) UnmarshalJSON(b []byte) error {
+ var s *string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ if s == nil {
+ *ts = Timestamp{}
+ return nil
+ }
+
+ switch *s {
+ case "infinity":
+ *ts = Timestamp{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ *ts = Timestamp{Valid: true, InfinityModifier: -Infinity}
+ default:
+ // Parse time with or without timezonr
+ tss := *s
+ // PostgreSQL uses ISO 8601 without timezone for to_json function and casting from a string to timestampt
+ tim, err := time.Parse(time.RFC3339Nano, tss)
+ if err == nil {
+ *ts = Timestamp{Time: tim, Valid: true}
+ return nil
+ }
+ tim, err = time.ParseInLocation(jsonISO8601, tss, time.UTC)
+ if err == nil {
+ *ts = Timestamp{Time: tim, Valid: true}
+ return nil
+ }
+ ts.Valid = false
+ return fmt.Errorf("cannot unmarshal %s to timestamp with layout %s or %s (%w)",
+ *s, time.RFC3339Nano, jsonISO8601, err)
+ }
+ return nil
+}
+
+type TimestampCodec struct {
+ // ScanLocation is the location that the time is assumed to be in for scanning. This is different from
+ // TimestamptzCodec.ScanLocation in that this setting does change the instant in time that the timestamp represents.
+ ScanLocation *time.Location
+}
+
+func (*TimestampCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*TimestampCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (*TimestampCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(TimestampValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanTimestampCodecBinary{}
+ case TextFormatCode:
+ return encodePlanTimestampCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanTimestampCodecBinary struct{}
+
+func (encodePlanTimestampCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ts, err := value.(TimestampValuer).TimestampValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ var microsecSinceY2K int64
+ switch ts.InfinityModifier {
+ case Finite:
+ t := discardTimeZone(ts.Time)
+ microsecSinceUnixEpoch := t.Unix()*1000000 + int64(t.Nanosecond())/1000
+ microsecSinceY2K = microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
+ case Infinity:
+ microsecSinceY2K = infinityMicrosecondOffset
+ case NegativeInfinity:
+ microsecSinceY2K = negativeInfinityMicrosecondOffset
+ }
+
+ buf = pgio.AppendInt64(buf, microsecSinceY2K)
+
+ return buf, nil
+}
+
+type encodePlanTimestampCodecText struct{}
+
+func (encodePlanTimestampCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ts, err := value.(TimestampValuer).TimestampValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ var s string
+
+ switch ts.InfinityModifier {
+ case Finite:
+ t := discardTimeZone(ts.Time)
+
+ // Year 0000 is 1 BC
+ bc := false
+ if year := t.Year(); year <= 0 {
+ year = -year + 1
+ t = time.Date(year, t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC)
+ bc = true
+ }
+
+ s = t.Truncate(time.Microsecond).Format(pgTimestampFormat)
+
+ if bc {
+ s = s + " BC"
+ }
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ buf = append(buf, s...)
+
+ return buf, nil
+}
+
+func discardTimeZone(t time.Time) time.Time {
+ if t.Location() != time.UTC {
+ return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC)
+ }
+
+ return t
+}
+
+func (c *TimestampCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case TimestampScanner:
+ return &scanPlanBinaryTimestampToTimestampScanner{location: c.ScanLocation}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case TimestampScanner:
+ return &scanPlanTextTimestampToTimestampScanner{location: c.ScanLocation}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryTimestampToTimestampScanner struct{ location *time.Location }
+
+func (plan *scanPlanBinaryTimestampToTimestampScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimestampScanner)
+
+ if src == nil {
+ return scanner.ScanTimestamp(Timestamp{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for timestamp: %v", len(src))
+ }
+
+ var ts Timestamp
+ microsecSinceY2K := int64(binary.BigEndian.Uint64(src))
+
+ switch microsecSinceY2K {
+ case infinityMicrosecondOffset:
+ ts = Timestamp{Valid: true, InfinityModifier: Infinity}
+ case negativeInfinityMicrosecondOffset:
+ ts = Timestamp{Valid: true, InfinityModifier: -Infinity}
+ default:
+ tim := time.Unix(
+ microsecFromUnixEpochToY2K/1000000+microsecSinceY2K/1000000,
+ (microsecFromUnixEpochToY2K%1000000*1000)+(microsecSinceY2K%1000000*1000),
+ ).UTC()
+ if plan.location != nil {
+ tim = time.Date(tim.Year(), tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), plan.location)
+ }
+ ts = Timestamp{Time: tim, Valid: true}
+ }
+
+ return scanner.ScanTimestamp(ts)
+}
+
+type scanPlanTextTimestampToTimestampScanner struct{ location *time.Location }
+
+func (plan *scanPlanTextTimestampToTimestampScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimestampScanner)
+
+ if src == nil {
+ return scanner.ScanTimestamp(Timestamp{})
+ }
+
+ var ts Timestamp
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ ts = Timestamp{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ ts = Timestamp{Valid: true, InfinityModifier: -Infinity}
+ default:
+ bc := false
+ if strings.HasSuffix(sbuf, " BC") {
+ sbuf = sbuf[:len(sbuf)-3]
+ bc = true
+ }
+ tim, err := time.Parse(pgTimestampFormat, sbuf)
+ if err != nil {
+ return err
+ }
+
+ if bc {
+ year := -tim.Year() + 1
+ tim = time.Date(year, tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), tim.Location())
+ }
+
+ if plan.location != nil {
+ tim = time.Date(tim.Year(), tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), plan.location)
+ }
+
+ ts = Timestamp{Time: tim, Valid: true}
+ }
+
+ return scanner.ScanTimestamp(ts)
+}
+
+func (c *TimestampCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var ts Timestamp
+ err := codecScan(c, m, oid, format, src, &ts)
+ if err != nil {
+ return nil, err
+ }
+
+ if ts.InfinityModifier != Finite {
+ return ts.InfinityModifier.String(), nil
+ }
+
+ return ts.Time, nil
+}
+
+func (c *TimestampCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var ts Timestamp
+ err := codecScan(c, m, oid, format, src, &ts)
+ if err != nil {
+ return nil, err
+ }
+
+ if ts.InfinityModifier != Finite {
+ return ts.InfinityModifier, nil
+ }
+
+ return ts.Time, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go b/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go
new file mode 100644
index 0000000..7efbcff
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go
@@ -0,0 +1,366 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const pgTimestamptzHourFormat = "2006-01-02 15:04:05.999999999Z07"
+const pgTimestamptzMinuteFormat = "2006-01-02 15:04:05.999999999Z07:00"
+const pgTimestamptzSecondFormat = "2006-01-02 15:04:05.999999999Z07:00:00"
+const microsecFromUnixEpochToY2K = 946684800 * 1000000
+
+const (
+ negativeInfinityMicrosecondOffset = -9223372036854775808
+ infinityMicrosecondOffset = 9223372036854775807
+)
+
+type TimestamptzScanner interface {
+ ScanTimestamptz(v Timestamptz) error
+}
+
+type TimestamptzValuer interface {
+ TimestamptzValue() (Timestamptz, error)
+}
+
+// Timestamptz represents the PostgreSQL timestamptz type.
+type Timestamptz struct {
+ Time time.Time
+ InfinityModifier InfinityModifier
+ Valid bool
+}
+
+func (tstz *Timestamptz) ScanTimestamptz(v Timestamptz) error {
+ *tstz = v
+ return nil
+}
+
+func (tstz Timestamptz) TimestamptzValue() (Timestamptz, error) {
+ return tstz, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (tstz *Timestamptz) Scan(src any) error {
+ if src == nil {
+ *tstz = Timestamptz{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return (&scanPlanTextTimestamptzToTimestamptzScanner{}).Scan([]byte(src), tstz)
+ case time.Time:
+ *tstz = Timestamptz{Time: src, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (tstz Timestamptz) Value() (driver.Value, error) {
+ if !tstz.Valid {
+ return nil, nil
+ }
+
+ if tstz.InfinityModifier != Finite {
+ return tstz.InfinityModifier.String(), nil
+ }
+ return tstz.Time, nil
+}
+
+func (tstz Timestamptz) MarshalJSON() ([]byte, error) {
+ if !tstz.Valid {
+ return []byte("null"), nil
+ }
+
+ var s string
+
+ switch tstz.InfinityModifier {
+ case Finite:
+ s = tstz.Time.Format(time.RFC3339Nano)
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return json.Marshal(s)
+}
+
+func (tstz *Timestamptz) UnmarshalJSON(b []byte) error {
+ var s *string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ if s == nil {
+ *tstz = Timestamptz{}
+ return nil
+ }
+
+ switch *s {
+ case "infinity":
+ *tstz = Timestamptz{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ *tstz = Timestamptz{Valid: true, InfinityModifier: -Infinity}
+ default:
+ // PostgreSQL uses ISO 8601 for to_json function and casting from a string to timestamptz
+ tim, err := time.Parse(time.RFC3339Nano, *s)
+ if err != nil {
+ return err
+ }
+
+ *tstz = Timestamptz{Time: tim, Valid: true}
+ }
+
+ return nil
+}
+
+type TimestamptzCodec struct {
+ // ScanLocation is the location to return scanned timestamptz values in. This does not change the instant in time that
+ // the timestamptz represents.
+ ScanLocation *time.Location
+}
+
+func (*TimestamptzCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*TimestamptzCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (*TimestamptzCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(TimestamptzValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanTimestamptzCodecBinary{}
+ case TextFormatCode:
+ return encodePlanTimestamptzCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanTimestamptzCodecBinary struct{}
+
+func (encodePlanTimestamptzCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ts, err := value.(TimestamptzValuer).TimestamptzValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ var microsecSinceY2K int64
+ switch ts.InfinityModifier {
+ case Finite:
+ microsecSinceUnixEpoch := ts.Time.Unix()*1000000 + int64(ts.Time.Nanosecond())/1000
+ microsecSinceY2K = microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
+ case Infinity:
+ microsecSinceY2K = infinityMicrosecondOffset
+ case NegativeInfinity:
+ microsecSinceY2K = negativeInfinityMicrosecondOffset
+ }
+
+ buf = pgio.AppendInt64(buf, microsecSinceY2K)
+
+ return buf, nil
+}
+
+type encodePlanTimestamptzCodecText struct{}
+
+func (encodePlanTimestamptzCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ts, err := value.(TimestamptzValuer).TimestamptzValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ var s string
+
+ switch ts.InfinityModifier {
+ case Finite:
+
+ t := ts.Time.UTC().Truncate(time.Microsecond)
+
+ // Year 0000 is 1 BC
+ bc := false
+ if year := t.Year(); year <= 0 {
+ year = -year + 1
+ t = time.Date(year, t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC)
+ bc = true
+ }
+
+ s = t.Format(pgTimestamptzSecondFormat)
+
+ if bc {
+ s = s + " BC"
+ }
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ buf = append(buf, s...)
+
+ return buf, nil
+}
+
+func (c *TimestamptzCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case TimestamptzScanner:
+ return &scanPlanBinaryTimestamptzToTimestamptzScanner{location: c.ScanLocation}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case TimestamptzScanner:
+ return &scanPlanTextTimestamptzToTimestamptzScanner{location: c.ScanLocation}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryTimestamptzToTimestamptzScanner struct{ location *time.Location }
+
+func (plan *scanPlanBinaryTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimestamptzScanner)
+
+ if src == nil {
+ return scanner.ScanTimestamptz(Timestamptz{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for timestamptz: %v", len(src))
+ }
+
+ var tstz Timestamptz
+ microsecSinceY2K := int64(binary.BigEndian.Uint64(src))
+
+ switch microsecSinceY2K {
+ case infinityMicrosecondOffset:
+ tstz = Timestamptz{Valid: true, InfinityModifier: Infinity}
+ case negativeInfinityMicrosecondOffset:
+ tstz = Timestamptz{Valid: true, InfinityModifier: -Infinity}
+ default:
+ tim := time.Unix(
+ microsecFromUnixEpochToY2K/1000000+microsecSinceY2K/1000000,
+ (microsecFromUnixEpochToY2K%1000000*1000)+(microsecSinceY2K%1000000*1000),
+ )
+ if plan.location != nil {
+ tim = tim.In(plan.location)
+ }
+ tstz = Timestamptz{Time: tim, Valid: true}
+ }
+
+ return scanner.ScanTimestamptz(tstz)
+}
+
+type scanPlanTextTimestamptzToTimestamptzScanner struct{ location *time.Location }
+
+func (plan *scanPlanTextTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimestamptzScanner)
+
+ if src == nil {
+ return scanner.ScanTimestamptz(Timestamptz{})
+ }
+
+ var tstz Timestamptz
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ tstz = Timestamptz{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ tstz = Timestamptz{Valid: true, InfinityModifier: -Infinity}
+ default:
+ bc := false
+ if strings.HasSuffix(sbuf, " BC") {
+ sbuf = sbuf[:len(sbuf)-3]
+ bc = true
+ }
+
+ var format string
+ if len(sbuf) >= 9 && (sbuf[len(sbuf)-9] == '-' || sbuf[len(sbuf)-9] == '+') {
+ format = pgTimestamptzSecondFormat
+ } else if len(sbuf) >= 6 && (sbuf[len(sbuf)-6] == '-' || sbuf[len(sbuf)-6] == '+') {
+ format = pgTimestamptzMinuteFormat
+ } else {
+ format = pgTimestamptzHourFormat
+ }
+
+ tim, err := time.Parse(format, sbuf)
+ if err != nil {
+ return err
+ }
+
+ if bc {
+ year := -tim.Year() + 1
+ tim = time.Date(year, tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), tim.Location())
+ }
+
+ if plan.location != nil {
+ tim = tim.In(plan.location)
+ }
+
+ tstz = Timestamptz{Time: tim, Valid: true}
+ }
+
+ return scanner.ScanTimestamptz(tstz)
+}
+
+func (c *TimestamptzCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var tstz Timestamptz
+ err := codecScan(c, m, oid, format, src, &tstz)
+ if err != nil {
+ return nil, err
+ }
+
+ if tstz.InfinityModifier != Finite {
+ return tstz.InfinityModifier.String(), nil
+ }
+
+ return tstz.Time, nil
+}
+
+func (c *TimestamptzCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var tstz Timestamptz
+ err := codecScan(c, m, oid, format, src, &tstz)
+ if err != nil {
+ return nil, err
+ }
+
+ if tstz.InfinityModifier != Finite {
+ return tstz.InfinityModifier, nil
+ }
+
+ return tstz.Time, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go b/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
new file mode 100644
index 0000000..f2b2fa6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
@@ -0,0 +1,325 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Uint32Scanner interface {
+ ScanUint32(v Uint32) error
+}
+
+type Uint32Valuer interface {
+ Uint32Value() (Uint32, error)
+}
+
+// Uint32 is the core type that is used to represent PostgreSQL types such as OID, CID, and XID.
+type Uint32 struct {
+ Uint32 uint32
+ Valid bool
+}
+
+func (n *Uint32) ScanUint32(v Uint32) error {
+ *n = v
+ return nil
+}
+
+func (n Uint32) Uint32Value() (Uint32, error) {
+ return n, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Uint32) Scan(src any) error {
+ if src == nil {
+ *dst = Uint32{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ un, err := strconv.ParseUint(src, 10, 32)
+ if err != nil {
+ return err
+ }
+ n = int64(un)
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < 0 {
+ return fmt.Errorf("%d is less than the minimum value for Uint32", n)
+ }
+ if n > math.MaxUint32 {
+ return fmt.Errorf("%d is greater than maximum value for Uint32", n)
+ }
+
+ *dst = Uint32{Uint32: uint32(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Uint32) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Uint32), nil
+}
+
+type Uint32Codec struct{}
+
+func (Uint32Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Uint32Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Uint32Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case uint32:
+ return encodePlanUint32CodecBinaryUint32{}
+ case Uint32Valuer:
+ return encodePlanUint32CodecBinaryUint32Valuer{}
+ case Int64Valuer:
+ return encodePlanUint32CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case uint32:
+ return encodePlanUint32CodecTextUint32{}
+ case Int64Valuer:
+ return encodePlanUint32CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanUint32CodecBinaryUint32 struct{}
+
+func (encodePlanUint32CodecBinaryUint32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(uint32)
+ return pgio.AppendUint32(buf, v), nil
+}
+
+type encodePlanUint32CodecBinaryUint32Valuer struct{}
+
+func (encodePlanUint32CodecBinaryUint32Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Uint32Valuer).Uint32Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendUint32(buf, v.Uint32), nil
+}
+
+type encodePlanUint32CodecBinaryInt64Valuer struct{}
+
+func (encodePlanUint32CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ if v.Int64 < 0 {
+ return nil, fmt.Errorf("%d is less than minimum value for uint32", v.Int64)
+ }
+ if v.Int64 > math.MaxUint32 {
+ return nil, fmt.Errorf("%d is greater than maximum value for uint32", v.Int64)
+ }
+
+ return pgio.AppendUint32(buf, uint32(v.Int64)), nil
+}
+
+type encodePlanUint32CodecTextUint32 struct{}
+
+func (encodePlanUint32CodecTextUint32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(uint32)
+ return append(buf, strconv.FormatUint(uint64(v), 10)...), nil
+}
+
+type encodePlanUint32CodecTextUint32Valuer struct{}
+
+func (encodePlanUint32CodecTextUint32Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Uint32Valuer).Uint32Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ return append(buf, strconv.FormatUint(uint64(v.Uint32), 10)...), nil
+}
+
+type encodePlanUint32CodecTextInt64Valuer struct{}
+
+func (encodePlanUint32CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ if v.Int64 < 0 {
+ return nil, fmt.Errorf("%d is less than minimum value for uint32", v.Int64)
+ }
+ if v.Int64 > math.MaxUint32 {
+ return nil, fmt.Errorf("%d is greater than maximum value for uint32", v.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(v.Int64, 10)...), nil
+}
+
+func (Uint32Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *uint32:
+ return scanPlanBinaryUint32ToUint32{}
+ case Uint32Scanner:
+ return scanPlanBinaryUint32ToUint32Scanner{}
+ case TextScanner:
+ return scanPlanBinaryUint32ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case Uint32Scanner:
+ return scanPlanTextAnyToUint32Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Uint32Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n uint32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return int64(n), nil
+}
+
+func (c Uint32Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n uint32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryUint32ToUint32 struct{}
+
+func (scanPlanBinaryUint32ToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint32: %v", len(src))
+ }
+
+ p := (dst).(*uint32)
+ *p = binary.BigEndian.Uint32(src)
+
+ return nil
+}
+
+type scanPlanBinaryUint32ToUint32Scanner struct{}
+
+func (scanPlanBinaryUint32ToUint32Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Uint32Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanUint32(Uint32{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint32: %v", len(src))
+ }
+
+ n := binary.BigEndian.Uint32(src)
+
+ return s.ScanUint32(Uint32{Uint32: n, Valid: true})
+}
+
+type scanPlanBinaryUint32ToTextScanner struct{}
+
+func (scanPlanBinaryUint32ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint32: %v", len(src))
+ }
+
+ n := uint64(binary.BigEndian.Uint32(src))
+ return s.ScanText(Text{String: strconv.FormatUint(n, 10), Valid: true})
+}
+
+type scanPlanTextAnyToUint32Scanner struct{}
+
+func (scanPlanTextAnyToUint32Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Uint32Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanUint32(Uint32{})
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ return s.ScanUint32(Uint32{Uint32: uint32(n), Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/uint64.go b/vendor/github.com/jackc/pgx/v5/pgtype/uint64.go
new file mode 100644
index 0000000..dd2130e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/uint64.go
@@ -0,0 +1,322 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Uint64Scanner interface {
+ ScanUint64(v Uint64) error
+}
+
+type Uint64Valuer interface {
+ Uint64Value() (Uint64, error)
+}
+
+// Uint64 is the core type that is used to represent PostgreSQL types such as XID8.
+type Uint64 struct {
+ Uint64 uint64
+ Valid bool
+}
+
+func (n *Uint64) ScanUint64(v Uint64) error {
+ *n = v
+ return nil
+}
+
+func (n Uint64) Uint64Value() (Uint64, error) {
+ return n, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Uint64) Scan(src any) error {
+ if src == nil {
+ *dst = Uint64{}
+ return nil
+ }
+
+ var n uint64
+
+ switch src := src.(type) {
+ case int64:
+ if src < 0 {
+ return fmt.Errorf("%d is less than the minimum value for Uint64", src)
+ }
+ n = uint64(src)
+ case string:
+ un, err := strconv.ParseUint(src, 10, 64)
+ if err != nil {
+ return err
+ }
+ n = un
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ *dst = Uint64{Uint64: n, Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Uint64) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ // If the value is greater than the maximum value for int64, return it as a string instead of losing data or returning
+ // an error.
+ if src.Uint64 > math.MaxInt64 {
+ return strconv.FormatUint(src.Uint64, 10), nil
+ }
+
+ return int64(src.Uint64), nil
+}
+
+type Uint64Codec struct{}
+
+func (Uint64Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Uint64Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Uint64Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case uint64:
+ return encodePlanUint64CodecBinaryUint64{}
+ case Uint64Valuer:
+ return encodePlanUint64CodecBinaryUint64Valuer{}
+ case Int64Valuer:
+ return encodePlanUint64CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case uint64:
+ return encodePlanUint64CodecTextUint64{}
+ case Int64Valuer:
+ return encodePlanUint64CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanUint64CodecBinaryUint64 struct{}
+
+func (encodePlanUint64CodecBinaryUint64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(uint64)
+ return pgio.AppendUint64(buf, v), nil
+}
+
+type encodePlanUint64CodecBinaryUint64Valuer struct{}
+
+func (encodePlanUint64CodecBinaryUint64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Uint64Valuer).Uint64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendUint64(buf, v.Uint64), nil
+}
+
+type encodePlanUint64CodecBinaryInt64Valuer struct{}
+
+func (encodePlanUint64CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ if v.Int64 < 0 {
+ return nil, fmt.Errorf("%d is less than minimum value for uint64", v.Int64)
+ }
+
+ return pgio.AppendUint64(buf, uint64(v.Int64)), nil
+}
+
+type encodePlanUint64CodecTextUint64 struct{}
+
+func (encodePlanUint64CodecTextUint64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(uint64)
+ return append(buf, strconv.FormatUint(uint64(v), 10)...), nil
+}
+
+type encodePlanUint64CodecTextUint64Valuer struct{}
+
+func (encodePlanUint64CodecTextUint64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Uint64Valuer).Uint64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ return append(buf, strconv.FormatUint(v.Uint64, 10)...), nil
+}
+
+type encodePlanUint64CodecTextInt64Valuer struct{}
+
+func (encodePlanUint64CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ if v.Int64 < 0 {
+ return nil, fmt.Errorf("%d is less than minimum value for uint64", v.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(v.Int64, 10)...), nil
+}
+
+func (Uint64Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *uint64:
+ return scanPlanBinaryUint64ToUint64{}
+ case Uint64Scanner:
+ return scanPlanBinaryUint64ToUint64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryUint64ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case Uint64Scanner:
+ return scanPlanTextAnyToUint64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Uint64Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n uint64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return int64(n), nil
+}
+
+func (c Uint64Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n uint64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryUint64ToUint64 struct{}
+
+func (scanPlanBinaryUint64ToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint64: %v", len(src))
+ }
+
+ p := (dst).(*uint64)
+ *p = binary.BigEndian.Uint64(src)
+
+ return nil
+}
+
+type scanPlanBinaryUint64ToUint64Scanner struct{}
+
+func (scanPlanBinaryUint64ToUint64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Uint64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanUint64(Uint64{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint64: %v", len(src))
+ }
+
+ n := binary.BigEndian.Uint64(src)
+
+ return s.ScanUint64(Uint64{Uint64: n, Valid: true})
+}
+
+type scanPlanBinaryUint64ToTextScanner struct{}
+
+func (scanPlanBinaryUint64ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint64: %v", len(src))
+ }
+
+ n := uint64(binary.BigEndian.Uint64(src))
+ return s.ScanText(Text{String: strconv.FormatUint(n, 10), Valid: true})
+}
+
+type scanPlanTextAnyToUint64Scanner struct{}
+
+func (scanPlanTextAnyToUint64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Uint64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanUint64(Uint64{})
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ return s.ScanUint64(Uint64{Uint64: n, Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go b/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go
new file mode 100644
index 0000000..0628f19
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go
@@ -0,0 +1,289 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+)
+
+type UUIDScanner interface {
+ ScanUUID(v UUID) error
+}
+
+type UUIDValuer interface {
+ UUIDValue() (UUID, error)
+}
+
+type UUID struct {
+ Bytes [16]byte
+ Valid bool
+}
+
+func (b *UUID) ScanUUID(v UUID) error {
+ *b = v
+ return nil
+}
+
+func (b UUID) UUIDValue() (UUID, error) {
+ return b, nil
+}
+
+// parseUUID converts a string UUID in standard form to a byte array.
+func parseUUID(src string) (dst [16]byte, err error) {
+ switch len(src) {
+ case 36:
+ src = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:]
+ case 32:
+ // dashes already stripped, assume valid
+ default:
+ // assume invalid.
+ return dst, fmt.Errorf("cannot parse UUID %v", src)
+ }
+
+ buf, err := hex.DecodeString(src)
+ if err != nil {
+ return dst, err
+ }
+
+ copy(dst[:], buf)
+ return dst, err
+}
+
+// encodeUUID converts a uuid byte array to UUID standard string form.
+func encodeUUID(src [16]byte) string {
+ var buf [36]byte
+
+ hex.Encode(buf[0:8], src[:4])
+ buf[8] = '-'
+ hex.Encode(buf[9:13], src[4:6])
+ buf[13] = '-'
+ hex.Encode(buf[14:18], src[6:8])
+ buf[18] = '-'
+ hex.Encode(buf[19:23], src[8:10])
+ buf[23] = '-'
+ hex.Encode(buf[24:], src[10:])
+
+ return string(buf[:])
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *UUID) Scan(src any) error {
+ if src == nil {
+ *dst = UUID{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ buf, err := parseUUID(src)
+ if err != nil {
+ return err
+ }
+ *dst = UUID{Bytes: buf, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src UUID) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ return encodeUUID(src.Bytes), nil
+}
+
+func (src UUID) String() string {
+ if !src.Valid {
+ return ""
+ }
+
+ return encodeUUID(src.Bytes)
+}
+
+func (src UUID) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ var buff bytes.Buffer
+ buff.WriteByte('"')
+ buff.WriteString(encodeUUID(src.Bytes))
+ buff.WriteByte('"')
+ return buff.Bytes(), nil
+}
+
+func (dst *UUID) UnmarshalJSON(src []byte) error {
+ if bytes.Equal(src, []byte("null")) {
+ *dst = UUID{}
+ return nil
+ }
+ if len(src) != 38 {
+ return fmt.Errorf("invalid length for UUID: %v", len(src))
+ }
+ buf, err := parseUUID(string(src[1 : len(src)-1]))
+ if err != nil {
+ return err
+ }
+ *dst = UUID{Bytes: buf, Valid: true}
+ return nil
+}
+
+type UUIDCodec struct{}
+
+func (UUIDCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (UUIDCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (UUIDCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(UUIDValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanUUIDCodecBinaryUUIDValuer{}
+ case TextFormatCode:
+ return encodePlanUUIDCodecTextUUIDValuer{}
+ }
+
+ return nil
+}
+
+type encodePlanUUIDCodecBinaryUUIDValuer struct{}
+
+func (encodePlanUUIDCodecBinaryUUIDValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ uuid, err := value.(UUIDValuer).UUIDValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !uuid.Valid {
+ return nil, nil
+ }
+
+ return append(buf, uuid.Bytes[:]...), nil
+}
+
+type encodePlanUUIDCodecTextUUIDValuer struct{}
+
+func (encodePlanUUIDCodecTextUUIDValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ uuid, err := value.(UUIDValuer).UUIDValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !uuid.Valid {
+ return nil, nil
+ }
+
+ return append(buf, encodeUUID(uuid.Bytes)...), nil
+}
+
+func (UUIDCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case UUIDScanner:
+ return scanPlanBinaryUUIDToUUIDScanner{}
+ case TextScanner:
+ return scanPlanBinaryUUIDToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case UUIDScanner:
+ return scanPlanTextAnyToUUIDScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryUUIDToUUIDScanner struct{}
+
+func (scanPlanBinaryUUIDToUUIDScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(UUIDScanner)
+
+ if src == nil {
+ return scanner.ScanUUID(UUID{})
+ }
+
+ if len(src) != 16 {
+ return fmt.Errorf("invalid length for UUID: %v", len(src))
+ }
+
+ uuid := UUID{Valid: true}
+ copy(uuid.Bytes[:], src)
+
+ return scanner.ScanUUID(uuid)
+}
+
+type scanPlanBinaryUUIDToTextScanner struct{}
+
+func (scanPlanBinaryUUIDToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ if len(src) != 16 {
+ return fmt.Errorf("invalid length for UUID: %v", len(src))
+ }
+
+ var buf [16]byte
+ copy(buf[:], src)
+
+ return scanner.ScanText(Text{String: encodeUUID(buf), Valid: true})
+}
+
+type scanPlanTextAnyToUUIDScanner struct{}
+
+func (scanPlanTextAnyToUUIDScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(UUIDScanner)
+
+ if src == nil {
+ return scanner.ScanUUID(UUID{})
+ }
+
+ buf, err := parseUUID(string(src))
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanUUID(UUID{Bytes: buf, Valid: true})
+}
+
+func (c UUIDCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var uuid UUID
+ err := codecScan(c, m, oid, format, src, &uuid)
+ if err != nil {
+ return nil, err
+ }
+
+ return encodeUUID(uuid.Bytes), nil
+}
+
+func (c UUIDCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var uuid UUID
+ err := codecScan(c, m, oid, format, src, &uuid)
+ if err != nil {
+ return nil, err
+ }
+ return uuid.Bytes, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/xml.go b/vendor/github.com/jackc/pgx/v5/pgtype/xml.go
new file mode 100644
index 0000000..79e3698
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/xml.go
@@ -0,0 +1,198 @@
+package pgtype
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+)
+
+type XMLCodec struct {
+ Marshal func(v any) ([]byte, error)
+ Unmarshal func(data []byte, v any) error
+}
+
+func (*XMLCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*XMLCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (c *XMLCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch value.(type) {
+ case string:
+ return encodePlanXMLCodecEitherFormatString{}
+ case []byte:
+ return encodePlanXMLCodecEitherFormatByteSlice{}
+
+ // Cannot rely on driver.Valuer being handled later because anything can be marshalled.
+ //
+ // https://github.com/jackc/pgx/issues/1430
+ //
+ // Check for driver.Valuer must come before xml.Marshaler so that it is guaranteed to be used
+ // when both are implemented https://github.com/jackc/pgx/issues/1805
+ case driver.Valuer:
+ return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
+
+ // Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
+ // marshalled.
+ //
+ // https://github.com/jackc/pgx/issues/1681
+ case xml.Marshaler:
+ return &encodePlanXMLCodecEitherFormatMarshal{
+ marshal: c.Marshal,
+ }
+ }
+
+ // Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
+ // appropriate wrappers here.
+ for _, f := range []TryWrapEncodePlanFunc{
+ TryWrapDerefPointerEncodePlan,
+ TryWrapFindUnderlyingTypeEncodePlan,
+ } {
+ if wrapperPlan, nextValue, ok := f(value); ok {
+ if nextPlan := c.PlanEncode(m, oid, format, nextValue); nextPlan != nil {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ return &encodePlanXMLCodecEitherFormatMarshal{
+ marshal: c.Marshal,
+ }
+}
+
+type encodePlanXMLCodecEitherFormatString struct{}
+
+func (encodePlanXMLCodecEitherFormatString) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ xmlString := value.(string)
+ buf = append(buf, xmlString...)
+ return buf, nil
+}
+
+type encodePlanXMLCodecEitherFormatByteSlice struct{}
+
+func (encodePlanXMLCodecEitherFormatByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ xmlBytes := value.([]byte)
+ if xmlBytes == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, xmlBytes...)
+ return buf, nil
+}
+
+type encodePlanXMLCodecEitherFormatMarshal struct {
+ marshal func(v any) ([]byte, error)
+}
+
+func (e *encodePlanXMLCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ xmlBytes, err := e.marshal(value)
+ if err != nil {
+ return nil, err
+ }
+
+ buf = append(buf, xmlBytes...)
+ return buf, nil
+}
+
+func (c *XMLCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch target.(type) {
+ case *string:
+ return scanPlanAnyToString{}
+
+ case **string:
+ // This is to fix **string scanning. It seems wrong to special case **string, but it's not clear what a better
+ // solution would be.
+ //
+ // https://github.com/jackc/pgx/issues/1470 -- **string
+ // https://github.com/jackc/pgx/issues/1691 -- ** anything else
+
+ if wrapperPlan, nextDst, ok := TryPointerPointerScanPlan(target); ok {
+ if nextPlan := m.planScan(oid, format, nextDst, 0); nextPlan != nil {
+ if _, failed := nextPlan.(*scanPlanFail); !failed {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ case *[]byte:
+ return scanPlanXMLToByteSlice{}
+ case BytesScanner:
+ return scanPlanBinaryBytesToBytesScanner{}
+
+ // Cannot rely on sql.Scanner being handled later because scanPlanXMLToXMLUnmarshal will take precedence.
+ //
+ // https://github.com/jackc/pgx/issues/1418
+ case sql.Scanner:
+ return &scanPlanSQLScanner{formatCode: format}
+ }
+
+ return &scanPlanXMLToXMLUnmarshal{
+ unmarshal: c.Unmarshal,
+ }
+}
+
+type scanPlanXMLToByteSlice struct{}
+
+func (scanPlanXMLToByteSlice) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanXMLToXMLUnmarshal struct {
+ unmarshal func(data []byte, v any) error
+}
+
+func (s *scanPlanXMLToXMLUnmarshal) Scan(src []byte, dst any) error {
+ if src == nil {
+ dstValue := reflect.ValueOf(dst)
+ if dstValue.Kind() == reflect.Ptr {
+ el := dstValue.Elem()
+ switch el.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Struct:
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+ }
+
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ elem := reflect.ValueOf(dst).Elem()
+ elem.Set(reflect.Zero(elem.Type()))
+
+ return s.unmarshal(src, dst)
+}
+
+func (c *XMLCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ dstBuf := make([]byte, len(src))
+ copy(dstBuf, src)
+ return dstBuf, nil
+}
+
+func (c *XMLCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var dst any
+ err := c.Unmarshal(src, &dst)
+ return dst, err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go b/vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go
new file mode 100644
index 0000000..5d5c681
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go
@@ -0,0 +1,52 @@
+package pgxpool
+
+import (
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+type errBatchResults struct {
+ err error
+}
+
+func (br errBatchResults) Exec() (pgconn.CommandTag, error) {
+ return pgconn.CommandTag{}, br.err
+}
+
+func (br errBatchResults) Query() (pgx.Rows, error) {
+ return errRows{err: br.err}, br.err
+}
+
+func (br errBatchResults) QueryRow() pgx.Row {
+ return errRow{err: br.err}
+}
+
+func (br errBatchResults) Close() error {
+ return br.err
+}
+
+type poolBatchResults struct {
+ br pgx.BatchResults
+ c *Conn
+}
+
+func (br *poolBatchResults) Exec() (pgconn.CommandTag, error) {
+ return br.br.Exec()
+}
+
+func (br *poolBatchResults) Query() (pgx.Rows, error) {
+ return br.br.Query()
+}
+
+func (br *poolBatchResults) QueryRow() pgx.Row {
+ return br.br.QueryRow()
+}
+
+func (br *poolBatchResults) Close() error {
+ err := br.br.Close()
+ if br.c != nil {
+ br.c.Release()
+ br.c = nil
+ }
+ return err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go b/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go
new file mode 100644
index 0000000..38c90f3
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go
@@ -0,0 +1,134 @@
+package pgxpool
+
+import (
+ "context"
+ "sync/atomic"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/puddle/v2"
+)
+
+// Conn is an acquired *pgx.Conn from a Pool.
+type Conn struct {
+ res *puddle.Resource[*connResource]
+ p *Pool
+}
+
+// Release returns c to the pool it was acquired from. Once Release has been called, other methods must not be called.
+// However, it is safe to call Release multiple times. Subsequent calls after the first will be ignored.
+func (c *Conn) Release() {
+ if c.res == nil {
+ return
+ }
+
+ conn := c.Conn()
+ res := c.res
+ c.res = nil
+
+ if c.p.releaseTracer != nil {
+ c.p.releaseTracer.TraceRelease(c.p, TraceReleaseData{Conn: conn})
+ }
+
+ if conn.IsClosed() || conn.PgConn().IsBusy() || conn.PgConn().TxStatus() != 'I' {
+ res.Destroy()
+ // Signal to the health check to run since we just destroyed a connections
+ // and we might be below minConns now
+ c.p.triggerHealthCheck()
+ return
+ }
+
+ // If the pool is consistently being used, we might never get to check the
+ // lifetime of a connection since we only check idle connections in checkConnsHealth
+ // so we also check the lifetime here and force a health check
+ if c.p.isExpired(res) {
+ atomic.AddInt64(&c.p.lifetimeDestroyCount, 1)
+ res.Destroy()
+ // Signal to the health check to run since we just destroyed a connections
+ // and we might be below minConns now
+ c.p.triggerHealthCheck()
+ return
+ }
+
+ if c.p.afterRelease == nil {
+ res.Release()
+ return
+ }
+
+ go func() {
+ if c.p.afterRelease(conn) {
+ res.Release()
+ } else {
+ res.Destroy()
+ // Signal to the health check to run since we just destroyed a connections
+ // and we might be below minConns now
+ c.p.triggerHealthCheck()
+ }
+ }()
+}
+
+// Hijack assumes ownership of the connection from the pool. Caller is responsible for closing the connection. Hijack
+// will panic if called on an already released or hijacked connection.
+func (c *Conn) Hijack() *pgx.Conn {
+ if c.res == nil {
+ panic("cannot hijack already released or hijacked connection")
+ }
+
+ conn := c.Conn()
+ res := c.res
+ c.res = nil
+
+ res.Hijack()
+
+ return conn
+}
+
+func (c *Conn) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+ return c.Conn().Exec(ctx, sql, arguments...)
+}
+
+func (c *Conn) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
+ return c.Conn().Query(ctx, sql, args...)
+}
+
+func (c *Conn) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
+ return c.Conn().QueryRow(ctx, sql, args...)
+}
+
+func (c *Conn) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
+ return c.Conn().SendBatch(ctx, b)
+}
+
+func (c *Conn) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
+ return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+// Begin starts a transaction block from the *Conn without explicitly setting a transaction mode (see BeginTx with TxOptions if transaction mode is required).
+func (c *Conn) Begin(ctx context.Context) (pgx.Tx, error) {
+ return c.Conn().Begin(ctx)
+}
+
+// BeginTx starts a transaction block from the *Conn with txOptions determining the transaction mode.
+func (c *Conn) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
+ return c.Conn().BeginTx(ctx, txOptions)
+}
+
+func (c *Conn) Ping(ctx context.Context) error {
+ return c.Conn().Ping(ctx)
+}
+
+func (c *Conn) Conn() *pgx.Conn {
+ return c.connResource().conn
+}
+
+func (c *Conn) connResource() *connResource {
+ return c.res.Value()
+}
+
+func (c *Conn) getPoolRow(r pgx.Row) *poolRow {
+ return c.connResource().getPoolRow(c, r)
+}
+
+func (c *Conn) getPoolRows(r pgx.Rows) *poolRows {
+ return c.connResource().getPoolRows(c, r)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go b/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go
new file mode 100644
index 0000000..099443b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go
@@ -0,0 +1,27 @@
+// Package pgxpool is a concurrency-safe connection pool for pgx.
+/*
+pgxpool implements a nearly identical interface to pgx connections.
+
+Creating a Pool
+
+The primary way of creating a pool is with [pgxpool.New]:
+
+ pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
+
+The database connection string can be in URL or keyword/value format. PostgreSQL settings, pgx settings, and pool settings can be
+specified here. In addition, a config struct can be created by [ParseConfig].
+
+ config, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL"))
+ if err != nil {
+ // ...
+ }
+ config.AfterConnect = func(ctx context.Context, conn *pgx.Conn) error {
+ // do something with every new connection
+ }
+
+ pool, err := pgxpool.NewWithConfig(context.Background(), config)
+
+A pool returns without waiting for any connections to be established. Acquire a connection immediately after creating
+the pool to check if a connection can successfully be established.
+*/
+package pgxpool
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go b/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go
new file mode 100644
index 0000000..e22ed28
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go
@@ -0,0 +1,741 @@
+package pgxpool
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/puddle/v2"
+)
+
+var defaultMaxConns = int32(4)
+var defaultMinConns = int32(0)
+var defaultMinIdleConns = int32(0)
+var defaultMaxConnLifetime = time.Hour
+var defaultMaxConnIdleTime = time.Minute * 30
+var defaultHealthCheckPeriod = time.Minute
+
+type connResource struct {
+ conn *pgx.Conn
+ conns []Conn
+ poolRows []poolRow
+ poolRowss []poolRows
+ maxAgeTime time.Time
+}
+
+func (cr *connResource) getConn(p *Pool, res *puddle.Resource[*connResource]) *Conn {
+ if len(cr.conns) == 0 {
+ cr.conns = make([]Conn, 128)
+ }
+
+ c := &cr.conns[len(cr.conns)-1]
+ cr.conns = cr.conns[0 : len(cr.conns)-1]
+
+ c.res = res
+ c.p = p
+
+ return c
+}
+
+func (cr *connResource) getPoolRow(c *Conn, r pgx.Row) *poolRow {
+ if len(cr.poolRows) == 0 {
+ cr.poolRows = make([]poolRow, 128)
+ }
+
+ pr := &cr.poolRows[len(cr.poolRows)-1]
+ cr.poolRows = cr.poolRows[0 : len(cr.poolRows)-1]
+
+ pr.c = c
+ pr.r = r
+
+ return pr
+}
+
+func (cr *connResource) getPoolRows(c *Conn, r pgx.Rows) *poolRows {
+ if len(cr.poolRowss) == 0 {
+ cr.poolRowss = make([]poolRows, 128)
+ }
+
+ pr := &cr.poolRowss[len(cr.poolRowss)-1]
+ cr.poolRowss = cr.poolRowss[0 : len(cr.poolRowss)-1]
+
+ pr.c = c
+ pr.r = r
+
+ return pr
+}
+
+// Pool allows for connection reuse.
+type Pool struct {
+ // 64 bit fields accessed with atomics must be at beginning of struct to guarantee alignment for certain 32-bit
+ // architectures. See BUGS section of https://pkg.go.dev/sync/atomic and https://github.com/jackc/pgx/issues/1288.
+ newConnsCount int64
+ lifetimeDestroyCount int64
+ idleDestroyCount int64
+
+ p *puddle.Pool[*connResource]
+ config *Config
+ beforeConnect func(context.Context, *pgx.ConnConfig) error
+ afterConnect func(context.Context, *pgx.Conn) error
+ beforeAcquire func(context.Context, *pgx.Conn) bool
+ afterRelease func(*pgx.Conn) bool
+ beforeClose func(*pgx.Conn)
+ minConns int32
+ minIdleConns int32
+ maxConns int32
+ maxConnLifetime time.Duration
+ maxConnLifetimeJitter time.Duration
+ maxConnIdleTime time.Duration
+ healthCheckPeriod time.Duration
+
+ healthCheckChan chan struct{}
+
+ acquireTracer AcquireTracer
+ releaseTracer ReleaseTracer
+
+ closeOnce sync.Once
+ closeChan chan struct{}
+}
+
+// Config is the configuration struct for creating a pool. It must be created by [ParseConfig] and then it can be
+// modified.
+type Config struct {
+ ConnConfig *pgx.ConnConfig
+
+ // BeforeConnect is called before a new connection is made. It is passed a copy of the underlying pgx.ConnConfig and
+ // will not impact any existing open connections.
+ BeforeConnect func(context.Context, *pgx.ConnConfig) error
+
+ // AfterConnect is called after a connection is established, but before it is added to the pool.
+ AfterConnect func(context.Context, *pgx.Conn) error
+
+ // BeforeAcquire is called before a connection is acquired from the pool. It must return true to allow the
+ // acquisition or false to indicate that the connection should be destroyed and a different connection should be
+ // acquired.
+ BeforeAcquire func(context.Context, *pgx.Conn) bool
+
+ // AfterRelease is called after a connection is released, but before it is returned to the pool. It must return true to
+ // return the connection to the pool or false to destroy the connection.
+ AfterRelease func(*pgx.Conn) bool
+
+ // BeforeClose is called right before a connection is closed and removed from the pool.
+ BeforeClose func(*pgx.Conn)
+
+ // MaxConnLifetime is the duration since creation after which a connection will be automatically closed.
+ MaxConnLifetime time.Duration
+
+ // MaxConnLifetimeJitter is the duration after MaxConnLifetime to randomly decide to close a connection.
+ // This helps prevent all connections from being closed at the exact same time, starving the pool.
+ MaxConnLifetimeJitter time.Duration
+
+ // MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check.
+ MaxConnIdleTime time.Duration
+
+ // MaxConns is the maximum size of the pool. The default is the greater of 4 or runtime.NumCPU().
+ MaxConns int32
+
+ // MinConns is the minimum size of the pool. After connection closes, the pool might dip below MinConns. A low
+ // number of MinConns might mean the pool is empty after MaxConnLifetime until the health check has a chance
+ // to create new connections.
+ MinConns int32
+
+ // MinIdleConns is the minimum number of idle connections in the pool. You can increase this to ensure that
+ // there are always idle connections available. This can help reduce tail latencies during request processing,
+ // as you can avoid the latency of establishing a new connection while handling requests. It is superior
+ // to MinConns for this purpose.
+ // Similar to MinConns, the pool might temporarily dip below MinIdleConns after connection closes.
+ MinIdleConns int32
+
+ // HealthCheckPeriod is the duration between checks of the health of idle connections.
+ HealthCheckPeriod time.Duration
+
+ createdByParseConfig bool // Used to enforce created by ParseConfig rule.
+}
+
+// Copy returns a deep copy of the config that is safe to use and modify.
+// The only exception is the tls.Config:
+// according to the tls.Config docs it must not be modified after creation.
+func (c *Config) Copy() *Config {
+ newConfig := new(Config)
+ *newConfig = *c
+ newConfig.ConnConfig = c.ConnConfig.Copy()
+ return newConfig
+}
+
+// ConnString returns the connection string as parsed by pgxpool.ParseConfig into pgxpool.Config.
+func (c *Config) ConnString() string { return c.ConnConfig.ConnString() }
+
+// New creates a new Pool. See [ParseConfig] for information on connString format.
+func New(ctx context.Context, connString string) (*Pool, error) {
+ config, err := ParseConfig(connString)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithConfig(ctx, config)
+}
+
+// NewWithConfig creates a new Pool. config must have been created by [ParseConfig].
+func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) {
+ // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
+ // zero values.
+ if !config.createdByParseConfig {
+ panic("config must be created by ParseConfig")
+ }
+
+ p := &Pool{
+ config: config,
+ beforeConnect: config.BeforeConnect,
+ afterConnect: config.AfterConnect,
+ beforeAcquire: config.BeforeAcquire,
+ afterRelease: config.AfterRelease,
+ beforeClose: config.BeforeClose,
+ minConns: config.MinConns,
+ minIdleConns: config.MinIdleConns,
+ maxConns: config.MaxConns,
+ maxConnLifetime: config.MaxConnLifetime,
+ maxConnLifetimeJitter: config.MaxConnLifetimeJitter,
+ maxConnIdleTime: config.MaxConnIdleTime,
+ healthCheckPeriod: config.HealthCheckPeriod,
+ healthCheckChan: make(chan struct{}, 1),
+ closeChan: make(chan struct{}),
+ }
+
+ if t, ok := config.ConnConfig.Tracer.(AcquireTracer); ok {
+ p.acquireTracer = t
+ }
+
+ if t, ok := config.ConnConfig.Tracer.(ReleaseTracer); ok {
+ p.releaseTracer = t
+ }
+
+ var err error
+ p.p, err = puddle.NewPool(
+ &puddle.Config[*connResource]{
+ Constructor: func(ctx context.Context) (*connResource, error) {
+ atomic.AddInt64(&p.newConnsCount, 1)
+ connConfig := p.config.ConnConfig.Copy()
+
+ // Connection will continue in background even if Acquire is canceled. Ensure that a connect won't hang forever.
+ if connConfig.ConnectTimeout <= 0 {
+ connConfig.ConnectTimeout = 2 * time.Minute
+ }
+
+ if p.beforeConnect != nil {
+ if err := p.beforeConnect(ctx, connConfig); err != nil {
+ return nil, err
+ }
+ }
+
+ conn, err := pgx.ConnectConfig(ctx, connConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ if p.afterConnect != nil {
+ err = p.afterConnect(ctx, conn)
+ if err != nil {
+ conn.Close(ctx)
+ return nil, err
+ }
+ }
+
+ jitterSecs := rand.Float64() * config.MaxConnLifetimeJitter.Seconds()
+ maxAgeTime := time.Now().Add(config.MaxConnLifetime).Add(time.Duration(jitterSecs) * time.Second)
+
+ cr := &connResource{
+ conn: conn,
+ conns: make([]Conn, 64),
+ poolRows: make([]poolRow, 64),
+ poolRowss: make([]poolRows, 64),
+ maxAgeTime: maxAgeTime,
+ }
+
+ return cr, nil
+ },
+ Destructor: func(value *connResource) {
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ conn := value.conn
+ if p.beforeClose != nil {
+ p.beforeClose(conn)
+ }
+ conn.Close(ctx)
+ select {
+ case <-conn.PgConn().CleanupDone():
+ case <-ctx.Done():
+ }
+ cancel()
+ },
+ MaxSize: config.MaxConns,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ targetIdleResources := max(int(p.minConns), int(p.minIdleConns))
+ p.createIdleResources(ctx, targetIdleResources)
+ p.backgroundHealthCheck()
+ }()
+
+ return p, nil
+}
+
+// ParseConfig builds a Config from connString. It parses connString with the same behavior as [pgx.ParseConfig] with the
+// addition of the following variables:
+//
+// - pool_max_conns: integer greater than 0 (default 4)
+// - pool_min_conns: integer 0 or greater (default 0)
+// - pool_max_conn_lifetime: duration string (default 1 hour)
+// - pool_max_conn_idle_time: duration string (default 30 minutes)
+// - pool_health_check_period: duration string (default 1 minute)
+// - pool_max_conn_lifetime_jitter: duration string (default 0)
+//
+// See Config for definitions of these arguments.
+//
+// # Example Keyword/Value
+// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca pool_max_conns=10 pool_max_conn_lifetime=1h30m
+//
+// # Example URL
+// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca&pool_max_conns=10&pool_max_conn_lifetime=1h30m
+func ParseConfig(connString string) (*Config, error) {
+ connConfig, err := pgx.ParseConfig(connString)
+ if err != nil {
+ return nil, err
+ }
+
+ config := &Config{
+ ConnConfig: connConfig,
+ createdByParseConfig: true,
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conns"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_max_conns")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse pool_max_conns: %w", err)
+ }
+ if n < 1 {
+ return nil, fmt.Errorf("pool_max_conns too small: %d", n)
+ }
+ config.MaxConns = int32(n)
+ } else {
+ config.MaxConns = defaultMaxConns
+ if numCPU := int32(runtime.NumCPU()); numCPU > config.MaxConns {
+ config.MaxConns = numCPU
+ }
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_min_conns"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_min_conns")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse pool_min_conns: %w", err)
+ }
+ config.MinConns = int32(n)
+ } else {
+ config.MinConns = defaultMinConns
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_min_idle_conns"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_min_idle_conns")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse pool_min_idle_conns: %w", err)
+ }
+ config.MinIdleConns = int32(n)
+ } else {
+ config.MinIdleConns = defaultMinIdleConns
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime")
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pool_max_conn_lifetime: %w", err)
+ }
+ config.MaxConnLifetime = d
+ } else {
+ config.MaxConnLifetime = defaultMaxConnLifetime
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_idle_time"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_max_conn_idle_time")
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pool_max_conn_idle_time: %w", err)
+ }
+ config.MaxConnIdleTime = d
+ } else {
+ config.MaxConnIdleTime = defaultMaxConnIdleTime
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_health_check_period"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_health_check_period")
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pool_health_check_period: %w", err)
+ }
+ config.HealthCheckPeriod = d
+ } else {
+ config.HealthCheckPeriod = defaultHealthCheckPeriod
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime_jitter"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime_jitter")
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pool_max_conn_lifetime_jitter: %w", err)
+ }
+ config.MaxConnLifetimeJitter = d
+ }
+
+ return config, nil
+}
+
+// Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned
+// to pool and closed.
+func (p *Pool) Close() {
+ p.closeOnce.Do(func() {
+ close(p.closeChan)
+ p.p.Close()
+ })
+}
+
+func (p *Pool) isExpired(res *puddle.Resource[*connResource]) bool {
+ return time.Now().After(res.Value().maxAgeTime)
+}
+
+func (p *Pool) triggerHealthCheck() {
+ go func() {
+ // Destroy is asynchronous so we give it time to actually remove itself from
+ // the pool otherwise we might try to check the pool size too soon
+ time.Sleep(500 * time.Millisecond)
+ select {
+ case p.healthCheckChan <- struct{}{}:
+ default:
+ }
+ }()
+}
+
+func (p *Pool) backgroundHealthCheck() {
+ ticker := time.NewTicker(p.healthCheckPeriod)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-p.closeChan:
+ return
+ case <-p.healthCheckChan:
+ p.checkHealth()
+ case <-ticker.C:
+ p.checkHealth()
+ }
+ }
+}
+
+func (p *Pool) checkHealth() {
+ for {
+ // If checkMinConns failed we don't destroy any connections since we couldn't
+ // even get to minConns
+ if err := p.checkMinConns(); err != nil {
+ // Should we log this error somewhere?
+ break
+ }
+ if !p.checkConnsHealth() {
+ // Since we didn't destroy any connections we can stop looping
+ break
+ }
+ // Technically Destroy is asynchronous but 500ms should be enough for it to
+ // remove it from the underlying pool
+ select {
+ case <-p.closeChan:
+ return
+ case <-time.After(500 * time.Millisecond):
+ }
+ }
+}
+
+// checkConnsHealth will check all idle connections, destroy a connection if
+// it's idle or too old, and returns true if any were destroyed
+func (p *Pool) checkConnsHealth() bool {
+ var destroyed bool
+ totalConns := p.Stat().TotalConns()
+ resources := p.p.AcquireAllIdle()
+ for _, res := range resources {
+ // We're okay going under minConns if the lifetime is up
+ if p.isExpired(res) && totalConns >= p.minConns {
+ atomic.AddInt64(&p.lifetimeDestroyCount, 1)
+ res.Destroy()
+ destroyed = true
+ // Since Destroy is async we manually decrement totalConns.
+ totalConns--
+ } else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {
+ atomic.AddInt64(&p.idleDestroyCount, 1)
+ res.Destroy()
+ destroyed = true
+ // Since Destroy is async we manually decrement totalConns.
+ totalConns--
+ } else {
+ res.ReleaseUnused()
+ }
+ }
+ return destroyed
+}
+
+func (p *Pool) checkMinConns() error {
+ // TotalConns can include ones that are being destroyed but we should have
+ // sleep(500ms) around all of the destroys to help prevent that from throwing
+ // off this check
+
+ // Create the number of connections needed to get to both minConns and minIdleConns
+ toCreate := max(p.minConns-p.Stat().TotalConns(), p.minIdleConns-p.Stat().IdleConns())
+ if toCreate > 0 {
+ return p.createIdleResources(context.Background(), int(toCreate))
+ }
+ return nil
+}
+
+func (p *Pool) createIdleResources(parentCtx context.Context, targetResources int) error {
+ ctx, cancel := context.WithCancel(parentCtx)
+ defer cancel()
+
+ errs := make(chan error, targetResources)
+
+ for i := 0; i < targetResources; i++ {
+ go func() {
+ err := p.p.CreateResource(ctx)
+ // Ignore ErrNotAvailable since it means that the pool has become full since we started creating resource.
+ if err == puddle.ErrNotAvailable {
+ err = nil
+ }
+ errs <- err
+ }()
+ }
+
+ var firstError error
+ for i := 0; i < targetResources; i++ {
+ err := <-errs
+ if err != nil && firstError == nil {
+ cancel()
+ firstError = err
+ }
+ }
+
+ return firstError
+}
+
+// Acquire returns a connection (*Conn) from the Pool
+func (p *Pool) Acquire(ctx context.Context) (c *Conn, err error) {
+ if p.acquireTracer != nil {
+ ctx = p.acquireTracer.TraceAcquireStart(ctx, p, TraceAcquireStartData{})
+ defer func() {
+ var conn *pgx.Conn
+ if c != nil {
+ conn = c.Conn()
+ }
+ p.acquireTracer.TraceAcquireEnd(ctx, p, TraceAcquireEndData{Conn: conn, Err: err})
+ }()
+ }
+
+ for {
+ res, err := p.p.Acquire(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ cr := res.Value()
+
+ if res.IdleDuration() > time.Second {
+ err := cr.conn.Ping(ctx)
+ if err != nil {
+ res.Destroy()
+ continue
+ }
+ }
+
+ if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
+ return cr.getConn(p, res), nil
+ }
+
+ res.Destroy()
+ }
+}
+
+// AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the
+// call of f. The return value is either an error acquiring the *Conn or the return value of f. The *Conn is
+// automatically released after the call of f.
+func (p *Pool) AcquireFunc(ctx context.Context, f func(*Conn) error) error {
+ conn, err := p.Acquire(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.Release()
+
+ return f(conn)
+}
+
+// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and
+// keep-alive functionality. It does not update pool statistics.
+func (p *Pool) AcquireAllIdle(ctx context.Context) []*Conn {
+ resources := p.p.AcquireAllIdle()
+ conns := make([]*Conn, 0, len(resources))
+ for _, res := range resources {
+ cr := res.Value()
+ if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
+ conns = append(conns, cr.getConn(p, res))
+ } else {
+ res.Destroy()
+ }
+ }
+
+ return conns
+}
+
+// Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would
+// disrupt all connections (such as a network interruption or a server state change).
+//
+// It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned
+// to the pool.
+func (p *Pool) Reset() {
+ p.p.Reset()
+}
+
+// Config returns a copy of config that was used to initialize this pool.
+func (p *Pool) Config() *Config { return p.config.Copy() }
+
+// Stat returns a pgxpool.Stat struct with a snapshot of Pool statistics.
+func (p *Pool) Stat() *Stat {
+ return &Stat{
+ s: p.p.Stat(),
+ newConnsCount: atomic.LoadInt64(&p.newConnsCount),
+ lifetimeDestroyCount: atomic.LoadInt64(&p.lifetimeDestroyCount),
+ idleDestroyCount: atomic.LoadInt64(&p.idleDestroyCount),
+ }
+}
+
+// Exec acquires a connection from the Pool and executes the given SQL.
+// SQL can be either a prepared statement name or an SQL string.
+// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
+// The acquired connection is returned to the pool when the Exec function returns.
+func (p *Pool) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ defer c.Release()
+
+ return c.Exec(ctx, sql, arguments...)
+}
+
+// Query acquires a connection and executes a query that returns pgx.Rows.
+// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
+// See pgx.Rows documentation to close the returned Rows and return the acquired connection to the Pool.
+//
+// If there is an error, the returned pgx.Rows will be returned in an error state.
+// If preferred, ignore the error returned from Query and handle errors using the returned pgx.Rows.
+//
+// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
+// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
+// needed. See the documentation for those types for details.
+func (p *Pool) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return errRows{err: err}, err
+ }
+
+ rows, err := c.Query(ctx, sql, args...)
+ if err != nil {
+ c.Release()
+ return errRows{err: err}, err
+ }
+
+ return c.getPoolRows(rows), nil
+}
+
+// QueryRow acquires a connection and executes a query that is expected
+// to return at most one row (pgx.Row). Errors are deferred until pgx.Row's
+// Scan method is called. If the query selects no rows, pgx.Row's Scan will
+// return ErrNoRows. Otherwise, pgx.Row's Scan scans the first selected row
+// and discards the rest. The acquired connection is returned to the Pool when
+// pgx.Row's Scan method is called.
+//
+// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
+//
+// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
+// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
+// needed. See the documentation for those types for details.
+func (p *Pool) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return errRow{err: err}
+ }
+
+ row := c.QueryRow(ctx, sql, args...)
+ return c.getPoolRow(row)
+}
+
+func (p *Pool) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return errBatchResults{err: err}
+ }
+
+ br := c.SendBatch(ctx, b)
+ return &poolBatchResults{br: br, c: c}
+}
+
+// Begin acquires a connection from the Pool and starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
+// auto-rollback on context cancellation. Begin initiates a transaction block without explicitly setting a transaction mode for the block (see BeginTx with TxOptions if transaction mode is required).
+// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
+// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
+func (p *Pool) Begin(ctx context.Context) (pgx.Tx, error) {
+ return p.BeginTx(ctx, pgx.TxOptions{})
+}
+
+// BeginTx acquires a connection from the Pool and starts a transaction with pgx.TxOptions determining the transaction mode.
+// Unlike database/sql, the context only affects the begin command. i.e. there is no auto-rollback on context cancellation.
+// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
+// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
+func (p *Pool) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ t, err := c.BeginTx(ctx, txOptions)
+ if err != nil {
+ c.Release()
+ return nil, err
+ }
+
+ return &Tx{t: t, c: c}, nil
+}
+
+func (p *Pool) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer c.Release()
+
+ return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+// Ping acquires a connection from the Pool and executes an empty sql statement against it.
+// If the sql returns without error, the database Ping is considered successful, otherwise, the error is returned.
+func (p *Pool) Ping(ctx context.Context) error {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return err
+ }
+ defer c.Release()
+ return c.Ping(ctx)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/rows.go b/vendor/github.com/jackc/pgx/v5/pgxpool/rows.go
new file mode 100644
index 0000000..f834b7e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/rows.go
@@ -0,0 +1,116 @@
+package pgxpool
+
+import (
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+type errRows struct {
+ err error
+}
+
+func (errRows) Close() {}
+func (e errRows) Err() error { return e.err }
+func (errRows) CommandTag() pgconn.CommandTag { return pgconn.CommandTag{} }
+func (errRows) FieldDescriptions() []pgconn.FieldDescription { return nil }
+func (errRows) Next() bool { return false }
+func (e errRows) Scan(dest ...any) error { return e.err }
+func (e errRows) Values() ([]any, error) { return nil, e.err }
+func (e errRows) RawValues() [][]byte { return nil }
+func (e errRows) Conn() *pgx.Conn { return nil }
+
+type errRow struct {
+ err error
+}
+
+func (e errRow) Scan(dest ...any) error { return e.err }
+
+type poolRows struct {
+ r pgx.Rows
+ c *Conn
+ err error
+}
+
+func (rows *poolRows) Close() {
+ rows.r.Close()
+ if rows.c != nil {
+ rows.c.Release()
+ rows.c = nil
+ }
+}
+
+func (rows *poolRows) Err() error {
+ if rows.err != nil {
+ return rows.err
+ }
+ return rows.r.Err()
+}
+
+func (rows *poolRows) CommandTag() pgconn.CommandTag {
+ return rows.r.CommandTag()
+}
+
+func (rows *poolRows) FieldDescriptions() []pgconn.FieldDescription {
+ return rows.r.FieldDescriptions()
+}
+
+func (rows *poolRows) Next() bool {
+ if rows.err != nil {
+ return false
+ }
+
+ n := rows.r.Next()
+ if !n {
+ rows.Close()
+ }
+ return n
+}
+
+func (rows *poolRows) Scan(dest ...any) error {
+ err := rows.r.Scan(dest...)
+ if err != nil {
+ rows.Close()
+ }
+ return err
+}
+
+func (rows *poolRows) Values() ([]any, error) {
+ values, err := rows.r.Values()
+ if err != nil {
+ rows.Close()
+ }
+ return values, err
+}
+
+func (rows *poolRows) RawValues() [][]byte {
+ return rows.r.RawValues()
+}
+
+func (rows *poolRows) Conn() *pgx.Conn {
+ return rows.r.Conn()
+}
+
+type poolRow struct {
+ r pgx.Row
+ c *Conn
+ err error
+}
+
+func (row *poolRow) Scan(dest ...any) error {
+ if row.err != nil {
+ return row.err
+ }
+
+ panicked := true
+ defer func() {
+ if panicked && row.c != nil {
+ row.c.Release()
+ }
+ }()
+ err := row.r.Scan(dest...)
+ panicked = false
+ if row.c != nil {
+ row.c.Release()
+ }
+ return err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/stat.go b/vendor/github.com/jackc/pgx/v5/pgxpool/stat.go
new file mode 100644
index 0000000..e02b6ac
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/stat.go
@@ -0,0 +1,91 @@
+package pgxpool
+
+import (
+ "time"
+
+ "github.com/jackc/puddle/v2"
+)
+
+// Stat is a snapshot of Pool statistics.
+type Stat struct {
+ s *puddle.Stat
+ newConnsCount int64
+ lifetimeDestroyCount int64
+ idleDestroyCount int64
+}
+
+// AcquireCount returns the cumulative count of successful acquires from the pool.
+func (s *Stat) AcquireCount() int64 {
+ return s.s.AcquireCount()
+}
+
+// AcquireDuration returns the total duration of all successful acquires from
+// the pool.
+func (s *Stat) AcquireDuration() time.Duration {
+ return s.s.AcquireDuration()
+}
+
+// AcquiredConns returns the number of currently acquired connections in the pool.
+func (s *Stat) AcquiredConns() int32 {
+ return s.s.AcquiredResources()
+}
+
+// CanceledAcquireCount returns the cumulative count of acquires from the pool
+// that were canceled by a context.
+func (s *Stat) CanceledAcquireCount() int64 {
+ return s.s.CanceledAcquireCount()
+}
+
+// ConstructingConns returns the number of conns with construction in progress in
+// the pool.
+func (s *Stat) ConstructingConns() int32 {
+ return s.s.ConstructingResources()
+}
+
+// EmptyAcquireCount returns the cumulative count of successful acquires from the pool
+// that waited for a resource to be released or constructed because the pool was
+// empty.
+func (s *Stat) EmptyAcquireCount() int64 {
+ return s.s.EmptyAcquireCount()
+}
+
+// IdleConns returns the number of currently idle conns in the pool.
+func (s *Stat) IdleConns() int32 {
+ return s.s.IdleResources()
+}
+
+// MaxConns returns the maximum size of the pool.
+func (s *Stat) MaxConns() int32 {
+ return s.s.MaxResources()
+}
+
+// TotalConns returns the total number of resources currently in the pool.
+// The value is the sum of ConstructingConns, AcquiredConns, and
+// IdleConns.
+func (s *Stat) TotalConns() int32 {
+ return s.s.TotalResources()
+}
+
+// NewConnsCount returns the cumulative count of new connections opened.
+func (s *Stat) NewConnsCount() int64 {
+ return s.newConnsCount
+}
+
+// MaxLifetimeDestroyCount returns the cumulative count of connections destroyed
+// because they exceeded MaxConnLifetime.
+func (s *Stat) MaxLifetimeDestroyCount() int64 {
+ return s.lifetimeDestroyCount
+}
+
+// MaxIdleDestroyCount returns the cumulative count of connections destroyed because
+// they exceeded MaxConnIdleTime.
+func (s *Stat) MaxIdleDestroyCount() int64 {
+ return s.idleDestroyCount
+}
+
+// EmptyAcquireWaitTime returns the cumulative time waited for successful acquires
+// from the pool for a resource to be released or constructed because the pool was
+// empty.
+func (s *Stat) EmptyAcquireWaitTime() time.Duration {
+ return s.s.EmptyAcquireWaitTime()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go b/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go
new file mode 100644
index 0000000..78b9d15
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go
@@ -0,0 +1,33 @@
+package pgxpool
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5"
+)
+
+// AcquireTracer traces Acquire.
+type AcquireTracer interface {
+ // TraceAcquireStart is called at the beginning of Acquire.
+ // The returned context is used for the rest of the call and will be passed to the TraceAcquireEnd.
+ TraceAcquireStart(ctx context.Context, pool *Pool, data TraceAcquireStartData) context.Context
+ // TraceAcquireEnd is called when a connection has been acquired.
+ TraceAcquireEnd(ctx context.Context, pool *Pool, data TraceAcquireEndData)
+}
+
+type TraceAcquireStartData struct{}
+
+type TraceAcquireEndData struct {
+ Conn *pgx.Conn
+ Err error
+}
+
+// ReleaseTracer traces Release.
+type ReleaseTracer interface {
+ // TraceRelease is called at the beginning of Release.
+ TraceRelease(pool *Pool, data TraceReleaseData)
+}
+
+type TraceReleaseData struct {
+ Conn *pgx.Conn
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go b/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
new file mode 100644
index 0000000..b49e7f4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
@@ -0,0 +1,83 @@
+package pgxpool
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// Tx represents a database transaction acquired from a Pool.
+type Tx struct {
+ t pgx.Tx
+ c *Conn
+}
+
+// Begin starts a pseudo nested transaction implemented with a savepoint.
+func (tx *Tx) Begin(ctx context.Context) (pgx.Tx, error) {
+ return tx.t.Begin(ctx)
+}
+
+// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return an error
+// where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is otherwise safe to call multiple times. If
+// the commit fails with a rollback status (e.g. the transaction was already in a broken state) then ErrTxCommitRollback
+// will be returned.
+func (tx *Tx) Commit(ctx context.Context) error {
+ err := tx.t.Commit(ctx)
+ if tx.c != nil {
+ tx.c.Release()
+ tx.c = nil
+ }
+ return err
+}
+
+// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return
+// where an error where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is otherwise safe to call
+// multiple times. Hence, defer tx.Rollback() is safe even if tx.Commit() will be called first in a non-error condition.
+func (tx *Tx) Rollback(ctx context.Context) error {
+ err := tx.t.Rollback(ctx)
+ if tx.c != nil {
+ tx.c.Release()
+ tx.c = nil
+ }
+ return err
+}
+
+func (tx *Tx) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
+ return tx.t.CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+func (tx *Tx) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
+ return tx.t.SendBatch(ctx, b)
+}
+
+func (tx *Tx) LargeObjects() pgx.LargeObjects {
+ return tx.t.LargeObjects()
+}
+
+// Prepare creates a prepared statement with name and sql. If the name is empty,
+// an anonymous prepared statement will be used. sql can contain placeholders
+// for bound parameters. These placeholders are referenced positionally as $1, $2, etc.
+//
+// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same
+// name and sql arguments. This allows a code path to Prepare and Query/Exec without
+// needing to first check whether the statement has already been prepared.
+func (tx *Tx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
+ return tx.t.Prepare(ctx, name, sql)
+}
+
+func (tx *Tx) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+ return tx.t.Exec(ctx, sql, arguments...)
+}
+
+func (tx *Tx) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
+ return tx.t.Query(ctx, sql, args...)
+}
+
+func (tx *Tx) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
+ return tx.t.QueryRow(ctx, sql, args...)
+}
+
+func (tx *Tx) Conn() *pgx.Conn {
+ return tx.t.Conn()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/rows.go b/vendor/github.com/jackc/pgx/v5/rows.go
new file mode 100644
index 0000000..3e64a3a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/rows.go
@@ -0,0 +1,874 @@
+package pgx
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// Rows is the result set returned from *Conn.Query. Rows must be closed before
+// the *Conn can be used again. Rows are closed by explicitly calling Close(),
+// calling Next() until it returns false, or when a fatal error occurs.
+//
+// Once a Rows is closed the only methods that may be called are Close(), Err(),
+// and CommandTag().
+//
+// Rows is an interface instead of a struct to allow tests to mock Query. However,
+// adding a method to an interface is technically a breaking change. Because of this
+// the Rows interface is partially excluded from semantic version requirements.
+// Methods will not be removed or changed, but new methods may be added.
+type Rows interface {
+ // Close closes the rows, making the connection ready for use again. It is safe
+ // to call Close after rows is already closed.
+ Close()
+
+ // Err returns any error that occurred while reading. Err must only be called after the Rows is closed (either by
+ // calling Close or by Next returning false). If it is called early it may return nil even if there was an error
+ // executing the query.
+ Err() error
+
+ // CommandTag returns the command tag from this query. It is only available after Rows is closed.
+ CommandTag() pgconn.CommandTag
+
+ // FieldDescriptions returns the field descriptions of the columns. It may return nil. In particular this can occur
+ // when there was an error executing the query.
+ FieldDescriptions() []pgconn.FieldDescription
+
+ // Next prepares the next row for reading. It returns true if there is another
+ // row and false if no more rows are available or a fatal error has occurred.
+ // It automatically closes rows when all rows are read.
+ //
+ // Callers should check rows.Err() after rows.Next() returns false to detect
+ // whether result-set reading ended prematurely due to an error. See
+ // Conn.Query for details.
+ //
+ // For simpler error handling, consider using the higher-level pgx v5
+ // CollectRows() and ForEachRow() helpers instead.
+ Next() bool
+
+ // Scan reads the values from the current row into dest values positionally.
+ // dest can include pointers to core types, values implementing the Scanner
+ // interface, and nil. nil will skip the value entirely. It is an error to
+ // call Scan without first calling Next() and checking that it returned true.
+ Scan(dest ...any) error
+
+ // Values returns the decoded row values. As with Scan(), it is an error to
+ // call Values without first calling Next() and checking that it returned
+ // true.
+ Values() ([]any, error)
+
+ // RawValues returns the unparsed bytes of the row values. The returned data is only valid until the next Next
+ // call or the Rows is closed.
+ RawValues() [][]byte
+
+ // Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a
+ // *Conn (e.g. if it was created by RowsFromResultReader)
+ Conn() *Conn
+}
+
+// Row is a convenience wrapper over Rows that is returned by QueryRow.
+//
+// Row is an interface instead of a struct to allow tests to mock QueryRow. However,
+// adding a method to an interface is technically a breaking change. Because of this
+// the Row interface is partially excluded from semantic version requirements.
+// Methods will not be removed or changed, but new methods may be added.
+type Row interface {
+ // Scan works the same as Rows. with the following exceptions. If no
+ // rows were found it returns ErrNoRows. If multiple rows are returned it
+ // ignores all but the first.
+ Scan(dest ...any) error
+}
+
+// RowScanner scans an entire row at a time into the RowScanner.
+type RowScanner interface {
+ // ScanRows scans the row.
+ ScanRow(rows Rows) error
+}
+
+// connRow implements the Row interface for Conn.QueryRow.
+type connRow baseRows
+
+func (r *connRow) Scan(dest ...any) (err error) {
+ rows := (*baseRows)(r)
+
+ if rows.Err() != nil {
+ return rows.Err()
+ }
+
+ for _, d := range dest {
+ if _, ok := d.(*pgtype.DriverBytes); ok {
+ rows.Close()
+ return fmt.Errorf("cannot scan into *pgtype.DriverBytes from QueryRow")
+ }
+ }
+
+ if !rows.Next() {
+ if rows.Err() == nil {
+ return ErrNoRows
+ }
+ return rows.Err()
+ }
+
+ rows.Scan(dest...)
+ rows.Close()
+ return rows.Err()
+}
+
+// baseRows implements the Rows interface for Conn.Query.
+type baseRows struct {
+ typeMap *pgtype.Map
+ resultReader *pgconn.ResultReader
+
+ values [][]byte
+
+ commandTag pgconn.CommandTag
+ err error
+ closed bool
+
+ scanPlans []pgtype.ScanPlan
+ scanTypes []reflect.Type
+
+ conn *Conn
+ multiResultReader *pgconn.MultiResultReader
+
+ queryTracer QueryTracer
+ batchTracer BatchTracer
+ ctx context.Context
+ startTime time.Time
+ sql string
+ args []any
+ rowCount int
+}
+
+func (rows *baseRows) FieldDescriptions() []pgconn.FieldDescription {
+ return rows.resultReader.FieldDescriptions()
+}
+
+func (rows *baseRows) Close() {
+ if rows.closed {
+ return
+ }
+
+ rows.closed = true
+
+ if rows.resultReader != nil {
+ var closeErr error
+ rows.commandTag, closeErr = rows.resultReader.Close()
+ if rows.err == nil {
+ rows.err = closeErr
+ }
+ }
+
+ if rows.multiResultReader != nil {
+ closeErr := rows.multiResultReader.Close()
+ if rows.err == nil {
+ rows.err = closeErr
+ }
+ }
+
+ if rows.err != nil && rows.conn != nil && rows.sql != "" {
+ if sc := rows.conn.statementCache; sc != nil {
+ sc.Invalidate(rows.sql)
+ }
+
+ if sc := rows.conn.descriptionCache; sc != nil {
+ sc.Invalidate(rows.sql)
+ }
+ }
+
+ if rows.batchTracer != nil {
+ rows.batchTracer.TraceBatchQuery(rows.ctx, rows.conn, TraceBatchQueryData{SQL: rows.sql, Args: rows.args, CommandTag: rows.commandTag, Err: rows.err})
+ } else if rows.queryTracer != nil {
+ rows.queryTracer.TraceQueryEnd(rows.ctx, rows.conn, TraceQueryEndData{rows.commandTag, rows.err})
+ }
+
+ // Zero references to other memory allocations. This allows them to be GC'd even when the Rows still referenced. In
+ // particular, when using pgxpool GC could be delayed as pgxpool.poolRows are allocated in large slices.
+ //
+ // https://github.com/jackc/pgx/pull/2269
+ rows.values = nil
+ rows.scanPlans = nil
+ rows.scanTypes = nil
+ rows.ctx = nil
+ rows.sql = ""
+ rows.args = nil
+}
+
+func (rows *baseRows) CommandTag() pgconn.CommandTag {
+ return rows.commandTag
+}
+
+func (rows *baseRows) Err() error {
+ return rows.err
+}
+
+// fatal signals an error occurred after the query was sent to the server. It
+// closes the rows automatically.
+func (rows *baseRows) fatal(err error) {
+ if rows.err != nil {
+ return
+ }
+
+ rows.err = err
+ rows.Close()
+}
+
+func (rows *baseRows) Next() bool {
+ if rows.closed {
+ return false
+ }
+
+ if rows.resultReader.NextRow() {
+ rows.rowCount++
+ rows.values = rows.resultReader.Values()
+ return true
+ } else {
+ rows.Close()
+ return false
+ }
+}
+
+func (rows *baseRows) Scan(dest ...any) error {
+ m := rows.typeMap
+ fieldDescriptions := rows.FieldDescriptions()
+ values := rows.values
+
+ if len(fieldDescriptions) != len(values) {
+ err := fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
+ rows.fatal(err)
+ return err
+ }
+
+ if len(dest) == 1 {
+ if rc, ok := dest[0].(RowScanner); ok {
+ err := rc.ScanRow(rows)
+ if err != nil {
+ rows.fatal(err)
+ }
+ return err
+ }
+ }
+
+ if len(fieldDescriptions) != len(dest) {
+ err := fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
+ rows.fatal(err)
+ return err
+ }
+
+ if rows.scanPlans == nil {
+ rows.scanPlans = make([]pgtype.ScanPlan, len(values))
+ rows.scanTypes = make([]reflect.Type, len(values))
+ for i := range dest {
+ rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
+ rows.scanTypes[i] = reflect.TypeOf(dest[i])
+ }
+ }
+
+ for i, dst := range dest {
+ if dst == nil {
+ continue
+ }
+
+ if rows.scanTypes[i] != reflect.TypeOf(dst) {
+ rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
+ rows.scanTypes[i] = reflect.TypeOf(dest[i])
+ }
+
+ err := rows.scanPlans[i].Scan(values[i], dst)
+ if err != nil {
+ err = ScanArgError{ColumnIndex: i, FieldName: fieldDescriptions[i].Name, Err: err}
+ rows.fatal(err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (rows *baseRows) Values() ([]any, error) {
+ if rows.closed {
+ return nil, errors.New("rows is closed")
+ }
+
+ values := make([]any, 0, len(rows.FieldDescriptions()))
+
+ for i := range rows.FieldDescriptions() {
+ buf := rows.values[i]
+ fd := &rows.FieldDescriptions()[i]
+
+ if buf == nil {
+ values = append(values, nil)
+ continue
+ }
+
+ if dt, ok := rows.typeMap.TypeForOID(fd.DataTypeOID); ok {
+ value, err := dt.Codec.DecodeValue(rows.typeMap, fd.DataTypeOID, fd.Format, buf)
+ if err != nil {
+ rows.fatal(err)
+ }
+ values = append(values, value)
+ } else {
+ switch fd.Format {
+ case TextFormatCode:
+ values = append(values, string(buf))
+ case BinaryFormatCode:
+ newBuf := make([]byte, len(buf))
+ copy(newBuf, buf)
+ values = append(values, newBuf)
+ default:
+ rows.fatal(errors.New("unknown format code"))
+ }
+ }
+
+ if rows.Err() != nil {
+ return nil, rows.Err()
+ }
+ }
+
+ return values, rows.Err()
+}
+
+func (rows *baseRows) RawValues() [][]byte {
+ return rows.values
+}
+
+func (rows *baseRows) Conn() *Conn {
+ return rows.conn
+}
+
+type ScanArgError struct {
+ ColumnIndex int
+ FieldName string
+ Err error
+}
+
+func (e ScanArgError) Error() string {
+ if e.FieldName == "?column?" { // Don't include the fieldname if it's unknown
+ return fmt.Sprintf("can't scan into dest[%d]: %v", e.ColumnIndex, e.Err)
+ }
+
+ return fmt.Sprintf("can't scan into dest[%d] (col: %s): %v", e.ColumnIndex, e.FieldName, e.Err)
+}
+
+func (e ScanArgError) Unwrap() error {
+ return e.Err
+}
+
+// ScanRow decodes raw row data into dest. It can be used to scan rows read from the lower level pgconn interface.
+//
+// typeMap - OID to Go type mapping.
+// fieldDescriptions - OID and format of values
+// values - the raw data as returned from the PostgreSQL server
+// dest - the destination that values will be decoded into
+func ScanRow(typeMap *pgtype.Map, fieldDescriptions []pgconn.FieldDescription, values [][]byte, dest ...any) error {
+ if len(fieldDescriptions) != len(values) {
+ return fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
+ }
+ if len(fieldDescriptions) != len(dest) {
+ return fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
+ }
+
+ for i, d := range dest {
+ if d == nil {
+ continue
+ }
+
+ err := typeMap.Scan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, values[i], d)
+ if err != nil {
+ return ScanArgError{ColumnIndex: i, FieldName: fieldDescriptions[i].Name, Err: err}
+ }
+ }
+
+ return nil
+}
+
+// RowsFromResultReader returns a Rows that will read from values resultReader and decode with typeMap. It can be used
+// to read from the lower level pgconn interface.
+func RowsFromResultReader(typeMap *pgtype.Map, resultReader *pgconn.ResultReader) Rows {
+ return &baseRows{
+ typeMap: typeMap,
+ resultReader: resultReader,
+ }
+}
+
+// ForEachRow iterates through rows. For each row it scans into the elements of scans and calls fn. If any row
+// fails to scan or fn returns an error the query will be aborted and the error will be returned. Rows will be closed
+// when ForEachRow returns.
+func ForEachRow(rows Rows, scans []any, fn func() error) (pgconn.CommandTag, error) {
+ defer rows.Close()
+
+ for rows.Next() {
+ err := rows.Scan(scans...)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ err = fn()
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ }
+
+ if err := rows.Err(); err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ return rows.CommandTag(), nil
+}
+
+// CollectableRow is the subset of Rows methods that a RowToFunc is allowed to call.
+type CollectableRow interface {
+ FieldDescriptions() []pgconn.FieldDescription
+ Scan(dest ...any) error
+ Values() ([]any, error)
+ RawValues() [][]byte
+}
+
+// RowToFunc is a function that scans or otherwise converts row to a T.
+type RowToFunc[T any] func(row CollectableRow) (T, error)
+
+// AppendRows iterates through rows, calling fn for each row, and appending the results into a slice of T.
+//
+// This function closes the rows automatically on return.
+func AppendRows[T any, S ~[]T](slice S, rows Rows, fn RowToFunc[T]) (S, error) {
+ defer rows.Close()
+
+ for rows.Next() {
+ value, err := fn(rows)
+ if err != nil {
+ return nil, err
+ }
+ slice = append(slice, value)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+
+ return slice, nil
+}
+
+// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T.
+//
+// This function closes the rows automatically on return.
+func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {
+ return AppendRows([]T{}, rows, fn)
+}
+
+// CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.
+// CollectOneRow is to CollectRows as QueryRow is to Query.
+//
+// This function closes the rows automatically on return.
+func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
+ defer rows.Close()
+
+ var value T
+ var err error
+
+ if !rows.Next() {
+ if err = rows.Err(); err != nil {
+ return value, err
+ }
+ return value, ErrNoRows
+ }
+
+ value, err = fn(rows)
+ if err != nil {
+ return value, err
+ }
+
+ // The defer rows.Close() won't have executed yet. If the query returned more than one row, rows would still be open.
+ // rows.Close() must be called before rows.Err() so we explicitly call it here.
+ rows.Close()
+ return value, rows.Err()
+}
+
+// CollectExactlyOneRow calls fn for the first row in rows and returns the result.
+// - If no rows are found returns an error where errors.Is(ErrNoRows) is true.
+// - If more than 1 row is found returns an error where errors.Is(ErrTooManyRows) is true.
+//
+// This function closes the rows automatically on return.
+func CollectExactlyOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
+ defer rows.Close()
+
+ var (
+ err error
+ value T
+ )
+
+ if !rows.Next() {
+ if err = rows.Err(); err != nil {
+ return value, err
+ }
+
+ return value, ErrNoRows
+ }
+
+ value, err = fn(rows)
+ if err != nil {
+ return value, err
+ }
+
+ if rows.Next() {
+ var zero T
+
+ return zero, ErrTooManyRows
+ }
+
+ return value, rows.Err()
+}
+
+// RowTo returns a T scanned from row.
+func RowTo[T any](row CollectableRow) (T, error) {
+ var value T
+ err := row.Scan(&value)
+ return value, err
+}
+
+// RowTo returns a the address of a T scanned from row.
+func RowToAddrOf[T any](row CollectableRow) (*T, error) {
+ var value T
+ err := row.Scan(&value)
+ return &value, err
+}
+
+// RowToMap returns a map scanned from row.
+func RowToMap(row CollectableRow) (map[string]any, error) {
+ var value map[string]any
+ err := row.Scan((*mapRowScanner)(&value))
+ return value, err
+}
+
+type mapRowScanner map[string]any
+
+func (rs *mapRowScanner) ScanRow(rows Rows) error {
+ values, err := rows.Values()
+ if err != nil {
+ return err
+ }
+
+ *rs = make(mapRowScanner, len(values))
+
+ for i := range values {
+ (*rs)[string(rows.FieldDescriptions()[i].Name)] = values[i]
+ }
+
+ return nil
+}
+
+// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number a public fields as row
+// has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then the field will be
+// ignored.
+func RowToStructByPos[T any](row CollectableRow) (T, error) {
+ var value T
+ err := (&positionalStructRowScanner{ptrToStruct: &value}).ScanRow(row)
+ return value, err
+}
+
+// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
+// public fields as row has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then
+// the field will be ignored.
+func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
+ var value T
+ err := (&positionalStructRowScanner{ptrToStruct: &value}).ScanRow(row)
+ return &value, err
+}
+
+type positionalStructRowScanner struct {
+ ptrToStruct any
+}
+
+func (rs *positionalStructRowScanner) ScanRow(rows CollectableRow) error {
+ typ := reflect.TypeOf(rs.ptrToStruct).Elem()
+ fields := lookupStructFields(typ)
+ if len(rows.RawValues()) > len(fields) {
+ return fmt.Errorf(
+ "got %d values, but dst struct has only %d fields",
+ len(rows.RawValues()),
+ len(fields),
+ )
+ }
+ scanTargets := setupStructScanTargets(rs.ptrToStruct, fields)
+ return rows.Scan(scanTargets...)
+}
+
+// Map from reflect.Type -> []structRowField
+var positionalStructFieldMap sync.Map
+
+func lookupStructFields(t reflect.Type) []structRowField {
+ if cached, ok := positionalStructFieldMap.Load(t); ok {
+ return cached.([]structRowField)
+ }
+
+ fieldStack := make([]int, 0, 1)
+ fields := computeStructFields(t, make([]structRowField, 0, t.NumField()), &fieldStack)
+ fieldsIface, _ := positionalStructFieldMap.LoadOrStore(t, fields)
+ return fieldsIface.([]structRowField)
+}
+
+func computeStructFields(
+ t reflect.Type,
+ fields []structRowField,
+ fieldStack *[]int,
+) []structRowField {
+ tail := len(*fieldStack)
+ *fieldStack = append(*fieldStack, 0)
+ for i := 0; i < t.NumField(); i++ {
+ sf := t.Field(i)
+ (*fieldStack)[tail] = i
+ // Handle anonymous struct embedding, but do not try to handle embedded pointers.
+ if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
+ fields = computeStructFields(sf.Type, fields, fieldStack)
+ } else if sf.PkgPath == "" {
+ dbTag, _ := sf.Tag.Lookup(structTagKey)
+ if dbTag == "-" {
+ // Field is ignored, skip it.
+ continue
+ }
+ fields = append(fields, structRowField{
+ path: append([]int(nil), *fieldStack...),
+ })
+ }
+ }
+ *fieldStack = (*fieldStack)[:tail]
+ return fields
+}
+
+// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
+// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
+// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
+func RowToStructByName[T any](row CollectableRow) (T, error) {
+ var value T
+ err := (&namedStructRowScanner{ptrToStruct: &value}).ScanRow(row)
+ return value, err
+}
+
+// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
+// of named public fields as row has fields. The row and T fields will be matched by name. The match is
+// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
+// then the field will be ignored.
+func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
+ var value T
+ err := (&namedStructRowScanner{ptrToStruct: &value}).ScanRow(row)
+ return &value, err
+}
+
+// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
+// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
+// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
+func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
+ var value T
+ err := (&namedStructRowScanner{ptrToStruct: &value, lax: true}).ScanRow(row)
+ return value, err
+}
+
+// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
+// equal number of named public fields as row has fields. The row and T fields will be matched by name. The match is
+// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
+// then the field will be ignored.
+func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
+ var value T
+ err := (&namedStructRowScanner{ptrToStruct: &value, lax: true}).ScanRow(row)
+ return &value, err
+}
+
+type namedStructRowScanner struct {
+ ptrToStruct any
+ lax bool
+}
+
+func (rs *namedStructRowScanner) ScanRow(rows CollectableRow) error {
+ typ := reflect.TypeOf(rs.ptrToStruct).Elem()
+ fldDescs := rows.FieldDescriptions()
+ namedStructFields, err := lookupNamedStructFields(typ, fldDescs)
+ if err != nil {
+ return err
+ }
+ if !rs.lax && namedStructFields.missingField != "" {
+ return fmt.Errorf("cannot find field %s in returned row", namedStructFields.missingField)
+ }
+ fields := namedStructFields.fields
+ scanTargets := setupStructScanTargets(rs.ptrToStruct, fields)
+ return rows.Scan(scanTargets...)
+}
+
+// Map from namedStructFieldMap -> *namedStructFields
+var namedStructFieldMap sync.Map
+
+type namedStructFieldsKey struct {
+ t reflect.Type
+ colNames string
+}
+
+type namedStructFields struct {
+ fields []structRowField
+ // missingField is the first field from the struct without a corresponding row field.
+ // This is used to construct the correct error message for non-lax queries.
+ missingField string
+}
+
+func lookupNamedStructFields(
+ t reflect.Type,
+ fldDescs []pgconn.FieldDescription,
+) (*namedStructFields, error) {
+ key := namedStructFieldsKey{
+ t: t,
+ colNames: joinFieldNames(fldDescs),
+ }
+ if cached, ok := namedStructFieldMap.Load(key); ok {
+ return cached.(*namedStructFields), nil
+ }
+
+ // We could probably do two-levels of caching, where we compute the key -> fields mapping
+ // for a type only once, cache it by type, then use that to compute the column -> fields
+ // mapping for a given set of columns.
+ fieldStack := make([]int, 0, 1)
+ fields, missingField := computeNamedStructFields(
+ fldDescs,
+ t,
+ make([]structRowField, len(fldDescs)),
+ &fieldStack,
+ )
+ for i, f := range fields {
+ if f.path == nil {
+ return nil, fmt.Errorf(
+ "struct doesn't have corresponding row field %s",
+ fldDescs[i].Name,
+ )
+ }
+ }
+
+ fieldsIface, _ := namedStructFieldMap.LoadOrStore(
+ key,
+ &namedStructFields{fields: fields, missingField: missingField},
+ )
+ return fieldsIface.(*namedStructFields), nil
+}
+
+func joinFieldNames(fldDescs []pgconn.FieldDescription) string {
+ switch len(fldDescs) {
+ case 0:
+ return ""
+ case 1:
+ return fldDescs[0].Name
+ }
+
+ totalSize := len(fldDescs) - 1 // Space for separator bytes.
+ for _, d := range fldDescs {
+ totalSize += len(d.Name)
+ }
+ var b strings.Builder
+ b.Grow(totalSize)
+ b.WriteString(fldDescs[0].Name)
+ for _, d := range fldDescs[1:] {
+ b.WriteByte(0) // Join with NUL byte as it's (presumably) not a valid column character.
+ b.WriteString(d.Name)
+ }
+ return b.String()
+}
+
+func computeNamedStructFields(
+ fldDescs []pgconn.FieldDescription,
+ t reflect.Type,
+ fields []structRowField,
+ fieldStack *[]int,
+) ([]structRowField, string) {
+ var missingField string
+ tail := len(*fieldStack)
+ *fieldStack = append(*fieldStack, 0)
+ for i := 0; i < t.NumField(); i++ {
+ sf := t.Field(i)
+ (*fieldStack)[tail] = i
+ if sf.PkgPath != "" && !sf.Anonymous {
+ // Field is unexported, skip it.
+ continue
+ }
+ // Handle anonymous struct embedding, but do not try to handle embedded pointers.
+ if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
+ var missingSubField string
+ fields, missingSubField = computeNamedStructFields(
+ fldDescs,
+ sf.Type,
+ fields,
+ fieldStack,
+ )
+ if missingField == "" {
+ missingField = missingSubField
+ }
+ } else {
+ dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
+ if dbTagPresent {
+ dbTag, _, _ = strings.Cut(dbTag, ",")
+ }
+ if dbTag == "-" {
+ // Field is ignored, skip it.
+ continue
+ }
+ colName := dbTag
+ if !dbTagPresent {
+ colName = sf.Name
+ }
+ fpos := fieldPosByName(fldDescs, colName, !dbTagPresent)
+ if fpos == -1 {
+ if missingField == "" {
+ missingField = colName
+ }
+ continue
+ }
+ fields[fpos] = structRowField{
+ path: append([]int(nil), *fieldStack...),
+ }
+ }
+ }
+ *fieldStack = (*fieldStack)[:tail]
+
+ return fields, missingField
+}
+
+const structTagKey = "db"
+
+func fieldPosByName(fldDescs []pgconn.FieldDescription, field string, normalize bool) (i int) {
+ i = -1
+
+ if normalize {
+ field = strings.ReplaceAll(field, "_", "")
+ }
+ for i, desc := range fldDescs {
+ if normalize {
+ if strings.EqualFold(strings.ReplaceAll(desc.Name, "_", ""), field) {
+ return i
+ }
+ } else {
+ if desc.Name == field {
+ return i
+ }
+ }
+ }
+ return
+}
+
+// structRowField describes a field of a struct.
+//
+// TODO: It would be a bit more efficient to track the path using the pointer
+// offset within the (outermost) struct and use unsafe.Pointer arithmetic to
+// construct references when scanning rows. However, it's not clear it's worth
+// using unsafe for this.
+type structRowField struct {
+ path []int
+}
+
+func setupStructScanTargets(receiver any, fields []structRowField) []any {
+ scanTargets := make([]any, len(fields))
+ v := reflect.ValueOf(receiver).Elem()
+ for i, f := range fields {
+ scanTargets[i] = v.FieldByIndex(f.path).Addr().Interface()
+ }
+ return scanTargets
+}
diff --git a/vendor/github.com/jackc/pgx/v5/tracer.go b/vendor/github.com/jackc/pgx/v5/tracer.go
new file mode 100644
index 0000000..58ca99f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/tracer.go
@@ -0,0 +1,107 @@
+package pgx
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// QueryTracer traces Query, QueryRow, and Exec.
+type QueryTracer interface {
+ // TraceQueryStart is called at the beginning of Query, QueryRow, and Exec calls. The returned context is used for the
+ // rest of the call and will be passed to TraceQueryEnd.
+ TraceQueryStart(ctx context.Context, conn *Conn, data TraceQueryStartData) context.Context
+
+ TraceQueryEnd(ctx context.Context, conn *Conn, data TraceQueryEndData)
+}
+
+type TraceQueryStartData struct {
+ SQL string
+ Args []any
+}
+
+type TraceQueryEndData struct {
+ CommandTag pgconn.CommandTag
+ Err error
+}
+
+// BatchTracer traces SendBatch.
+type BatchTracer interface {
+ // TraceBatchStart is called at the beginning of SendBatch calls. The returned context is used for the
+ // rest of the call and will be passed to TraceBatchQuery and TraceBatchEnd.
+ TraceBatchStart(ctx context.Context, conn *Conn, data TraceBatchStartData) context.Context
+
+ TraceBatchQuery(ctx context.Context, conn *Conn, data TraceBatchQueryData)
+ TraceBatchEnd(ctx context.Context, conn *Conn, data TraceBatchEndData)
+}
+
+type TraceBatchStartData struct {
+ Batch *Batch
+}
+
+type TraceBatchQueryData struct {
+ SQL string
+ Args []any
+ CommandTag pgconn.CommandTag
+ Err error
+}
+
+type TraceBatchEndData struct {
+ Err error
+}
+
+// CopyFromTracer traces CopyFrom.
+type CopyFromTracer interface {
+ // TraceCopyFromStart is called at the beginning of CopyFrom calls. The returned context is used for the
+ // rest of the call and will be passed to TraceCopyFromEnd.
+ TraceCopyFromStart(ctx context.Context, conn *Conn, data TraceCopyFromStartData) context.Context
+
+ TraceCopyFromEnd(ctx context.Context, conn *Conn, data TraceCopyFromEndData)
+}
+
+type TraceCopyFromStartData struct {
+ TableName Identifier
+ ColumnNames []string
+}
+
+type TraceCopyFromEndData struct {
+ CommandTag pgconn.CommandTag
+ Err error
+}
+
+// PrepareTracer traces Prepare.
+type PrepareTracer interface {
+ // TracePrepareStart is called at the beginning of Prepare calls. The returned context is used for the
+ // rest of the call and will be passed to TracePrepareEnd.
+ TracePrepareStart(ctx context.Context, conn *Conn, data TracePrepareStartData) context.Context
+
+ TracePrepareEnd(ctx context.Context, conn *Conn, data TracePrepareEndData)
+}
+
+type TracePrepareStartData struct {
+ Name string
+ SQL string
+}
+
+type TracePrepareEndData struct {
+ AlreadyPrepared bool
+ Err error
+}
+
+// ConnectTracer traces Connect and ConnectConfig.
+type ConnectTracer interface {
+ // TraceConnectStart is called at the beginning of Connect and ConnectConfig calls. The returned context is used for
+ // the rest of the call and will be passed to TraceConnectEnd.
+ TraceConnectStart(ctx context.Context, data TraceConnectStartData) context.Context
+
+ TraceConnectEnd(ctx context.Context, data TraceConnectEndData)
+}
+
+type TraceConnectStartData struct {
+ ConnConfig *ConnConfig
+}
+
+type TraceConnectEndData struct {
+ Conn *Conn
+ Err error
+}
diff --git a/vendor/github.com/jackc/pgx/v5/tx.go b/vendor/github.com/jackc/pgx/v5/tx.go
new file mode 100644
index 0000000..571e5e0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/tx.go
@@ -0,0 +1,442 @@
+package pgx
+
+import (
+ "context"
+ "errors"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// TxIsoLevel is the transaction isolation level (serializable, repeatable read, read committed or read uncommitted)
+type TxIsoLevel string
+
+// Transaction isolation levels
+const (
+ Serializable TxIsoLevel = "serializable"
+ RepeatableRead TxIsoLevel = "repeatable read"
+ ReadCommitted TxIsoLevel = "read committed"
+ ReadUncommitted TxIsoLevel = "read uncommitted"
+)
+
+// TxAccessMode is the transaction access mode (read write or read only)
+type TxAccessMode string
+
+// Transaction access modes
+const (
+ ReadWrite TxAccessMode = "read write"
+ ReadOnly TxAccessMode = "read only"
+)
+
+// TxDeferrableMode is the transaction deferrable mode (deferrable or not deferrable)
+type TxDeferrableMode string
+
+// Transaction deferrable modes
+const (
+ Deferrable TxDeferrableMode = "deferrable"
+ NotDeferrable TxDeferrableMode = "not deferrable"
+)
+
+// TxOptions are transaction modes within a transaction block
+type TxOptions struct {
+ IsoLevel TxIsoLevel
+ AccessMode TxAccessMode
+ DeferrableMode TxDeferrableMode
+
+ // BeginQuery is the SQL query that will be executed to begin the transaction. This allows using non-standard syntax
+ // such as BEGIN PRIORITY HIGH with CockroachDB. If set this will override the other settings.
+ BeginQuery string
+ // CommitQuery is the SQL query that will be executed to commit the transaction.
+ CommitQuery string
+}
+
+var emptyTxOptions TxOptions
+
+func (txOptions TxOptions) beginSQL() string {
+ if txOptions == emptyTxOptions {
+ return "begin"
+ }
+
+ if txOptions.BeginQuery != "" {
+ return txOptions.BeginQuery
+ }
+
+ var buf strings.Builder
+ buf.Grow(64) // 64 - maximum length of string with available options
+ buf.WriteString("begin")
+
+ if txOptions.IsoLevel != "" {
+ buf.WriteString(" isolation level ")
+ buf.WriteString(string(txOptions.IsoLevel))
+ }
+ if txOptions.AccessMode != "" {
+ buf.WriteByte(' ')
+ buf.WriteString(string(txOptions.AccessMode))
+ }
+ if txOptions.DeferrableMode != "" {
+ buf.WriteByte(' ')
+ buf.WriteString(string(txOptions.DeferrableMode))
+ }
+
+ return buf.String()
+}
+
+var ErrTxClosed = errors.New("tx is closed")
+
+// ErrTxCommitRollback occurs when an error has occurred in a transaction and
+// Commit() is called. PostgreSQL accepts COMMIT on aborted transactions, but
+// it is treated as ROLLBACK.
+var ErrTxCommitRollback = errors.New("commit unexpectedly resulted in rollback")
+
+// Begin starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
+// auto-rollback on context cancellation.
+func (c *Conn) Begin(ctx context.Context) (Tx, error) {
+ return c.BeginTx(ctx, TxOptions{})
+}
+
+// BeginTx starts a transaction with txOptions determining the transaction mode. Unlike database/sql, the context only
+// affects the begin command. i.e. there is no auto-rollback on context cancellation.
+func (c *Conn) BeginTx(ctx context.Context, txOptions TxOptions) (Tx, error) {
+ _, err := c.Exec(ctx, txOptions.beginSQL())
+ if err != nil {
+ // begin should never fail unless there is an underlying connection issue or
+ // a context timeout. In either case, the connection is possibly broken.
+ c.die()
+ return nil, err
+ }
+
+ return &dbTx{
+ conn: c,
+ commitQuery: txOptions.CommitQuery,
+ }, nil
+}
+
+// Tx represents a database transaction.
+//
+// Tx is an interface instead of a struct to enable connection pools to be implemented without relying on internal pgx
+// state, to support pseudo-nested transactions with savepoints, and to allow tests to mock transactions. However,
+// adding a method to an interface is technically a breaking change. If new methods are added to Conn it may be
+// desirable to add them to Tx as well. Because of this the Tx interface is partially excluded from semantic version
+// requirements. Methods will not be removed or changed, but new methods may be added.
+type Tx interface {
+ // Begin starts a pseudo nested transaction.
+ Begin(ctx context.Context) (Tx, error)
+
+ // Commit commits the transaction if this is a real transaction or releases the savepoint if this is a pseudo nested
+ // transaction. Commit will return an error where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is
+ // otherwise safe to call multiple times. If the commit fails with a rollback status (e.g. the transaction was already
+ // in a broken state) then an error where errors.Is(ErrTxCommitRollback) is true will be returned.
+ Commit(ctx context.Context) error
+
+ // Rollback rolls back the transaction if this is a real transaction or rolls back to the savepoint if this is a
+ // pseudo nested transaction. Rollback will return an error where errors.Is(ErrTxClosed) is true if the Tx is already
+ // closed, but is otherwise safe to call multiple times. Hence, a defer tx.Rollback() is safe even if tx.Commit() will
+ // be called first in a non-error condition. Any other failure of a real transaction will result in the connection
+ // being closed.
+ Rollback(ctx context.Context) error
+
+ CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error)
+ SendBatch(ctx context.Context, b *Batch) BatchResults
+ LargeObjects() LargeObjects
+
+ Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error)
+
+ Exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error)
+ Query(ctx context.Context, sql string, args ...any) (Rows, error)
+ QueryRow(ctx context.Context, sql string, args ...any) Row
+
+ // Conn returns the underlying *Conn that on which this transaction is executing.
+ Conn() *Conn
+}
+
+// dbTx represents a database transaction.
+//
+// All dbTx methods return ErrTxClosed if Commit or Rollback has already been
+// called on the dbTx.
+type dbTx struct {
+ conn *Conn
+ savepointNum int64
+ closed bool
+ commitQuery string
+}
+
+// Begin starts a pseudo nested transaction implemented with a savepoint.
+func (tx *dbTx) Begin(ctx context.Context) (Tx, error) {
+ if tx.closed {
+ return nil, ErrTxClosed
+ }
+
+ tx.savepointNum++
+ _, err := tx.conn.Exec(ctx, "savepoint sp_"+strconv.FormatInt(tx.savepointNum, 10))
+ if err != nil {
+ return nil, err
+ }
+
+ return &dbSimulatedNestedTx{tx: tx, savepointNum: tx.savepointNum}, nil
+}
+
+// Commit commits the transaction.
+func (tx *dbTx) Commit(ctx context.Context) error {
+ if tx.closed {
+ return ErrTxClosed
+ }
+
+ commandSQL := "commit"
+ if tx.commitQuery != "" {
+ commandSQL = tx.commitQuery
+ }
+
+ commandTag, err := tx.conn.Exec(ctx, commandSQL)
+ tx.closed = true
+ if err != nil {
+ if tx.conn.PgConn().TxStatus() != 'I' {
+ _ = tx.conn.Close(ctx) // already have error to return
+ }
+ return err
+ }
+ if commandTag.String() == "ROLLBACK" {
+ return ErrTxCommitRollback
+ }
+
+ return nil
+}
+
+// Rollback rolls back the transaction. Rollback will return ErrTxClosed if the
+// Tx is already closed, but is otherwise safe to call multiple times. Hence, a
+// defer tx.Rollback() is safe even if tx.Commit() will be called first in a
+// non-error condition.
+func (tx *dbTx) Rollback(ctx context.Context) error {
+ if tx.closed {
+ return ErrTxClosed
+ }
+
+ _, err := tx.conn.Exec(ctx, "rollback")
+ tx.closed = true
+ if err != nil {
+ // A rollback failure leaves the connection in an undefined state
+ tx.conn.die()
+ return err
+ }
+
+ return nil
+}
+
+// Exec delegates to the underlying *Conn
+func (tx *dbTx) Exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) {
+ if tx.closed {
+ return pgconn.CommandTag{}, ErrTxClosed
+ }
+
+ return tx.conn.Exec(ctx, sql, arguments...)
+}
+
+// Prepare delegates to the underlying *Conn
+func (tx *dbTx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
+ if tx.closed {
+ return nil, ErrTxClosed
+ }
+
+ return tx.conn.Prepare(ctx, name, sql)
+}
+
+// Query delegates to the underlying *Conn
+func (tx *dbTx) Query(ctx context.Context, sql string, args ...any) (Rows, error) {
+ if tx.closed {
+ // Because checking for errors can be deferred to the *Rows, build one with the error
+ err := ErrTxClosed
+ return &baseRows{closed: true, err: err}, err
+ }
+
+ return tx.conn.Query(ctx, sql, args...)
+}
+
+// QueryRow delegates to the underlying *Conn
+func (tx *dbTx) QueryRow(ctx context.Context, sql string, args ...any) Row {
+ rows, _ := tx.Query(ctx, sql, args...)
+ return (*connRow)(rows.(*baseRows))
+}
+
+// CopyFrom delegates to the underlying *Conn
+func (tx *dbTx) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
+ if tx.closed {
+ return 0, ErrTxClosed
+ }
+
+ return tx.conn.CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+// SendBatch delegates to the underlying *Conn
+func (tx *dbTx) SendBatch(ctx context.Context, b *Batch) BatchResults {
+ if tx.closed {
+ return &batchResults{err: ErrTxClosed}
+ }
+
+ return tx.conn.SendBatch(ctx, b)
+}
+
+// LargeObjects returns a LargeObjects instance for the transaction.
+func (tx *dbTx) LargeObjects() LargeObjects {
+ return LargeObjects{tx: tx}
+}
+
+func (tx *dbTx) Conn() *Conn {
+ return tx.conn
+}
+
+// dbSimulatedNestedTx represents a simulated nested transaction implemented by a savepoint.
+type dbSimulatedNestedTx struct {
+ tx Tx
+ savepointNum int64
+ closed bool
+}
+
+// Begin starts a pseudo nested transaction implemented with a savepoint.
+func (sp *dbSimulatedNestedTx) Begin(ctx context.Context) (Tx, error) {
+ if sp.closed {
+ return nil, ErrTxClosed
+ }
+
+ return sp.tx.Begin(ctx)
+}
+
+// Commit releases the savepoint essentially committing the pseudo nested transaction.
+func (sp *dbSimulatedNestedTx) Commit(ctx context.Context) error {
+ if sp.closed {
+ return ErrTxClosed
+ }
+
+ _, err := sp.Exec(ctx, "release savepoint sp_"+strconv.FormatInt(sp.savepointNum, 10))
+ sp.closed = true
+ return err
+}
+
+// Rollback rolls back to the savepoint essentially rolling back the pseudo nested transaction. Rollback will return
+// ErrTxClosed if the dbSavepoint is already closed, but is otherwise safe to call multiple times. Hence, a defer sp.Rollback()
+// is safe even if sp.Commit() will be called first in a non-error condition.
+func (sp *dbSimulatedNestedTx) Rollback(ctx context.Context) error {
+ if sp.closed {
+ return ErrTxClosed
+ }
+
+ _, err := sp.Exec(ctx, "rollback to savepoint sp_"+strconv.FormatInt(sp.savepointNum, 10))
+ sp.closed = true
+ return err
+}
+
+// Exec delegates to the underlying Tx
+func (sp *dbSimulatedNestedTx) Exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) {
+ if sp.closed {
+ return pgconn.CommandTag{}, ErrTxClosed
+ }
+
+ return sp.tx.Exec(ctx, sql, arguments...)
+}
+
+// Prepare delegates to the underlying Tx
+func (sp *dbSimulatedNestedTx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
+ if sp.closed {
+ return nil, ErrTxClosed
+ }
+
+ return sp.tx.Prepare(ctx, name, sql)
+}
+
+// Query delegates to the underlying Tx
+func (sp *dbSimulatedNestedTx) Query(ctx context.Context, sql string, args ...any) (Rows, error) {
+ if sp.closed {
+ // Because checking for errors can be deferred to the *Rows, build one with the error
+ err := ErrTxClosed
+ return &baseRows{closed: true, err: err}, err
+ }
+
+ return sp.tx.Query(ctx, sql, args...)
+}
+
+// QueryRow delegates to the underlying Tx
+func (sp *dbSimulatedNestedTx) QueryRow(ctx context.Context, sql string, args ...any) Row {
+ rows, _ := sp.Query(ctx, sql, args...)
+ return (*connRow)(rows.(*baseRows))
+}
+
+// CopyFrom delegates to the underlying *Conn
+func (sp *dbSimulatedNestedTx) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
+ if sp.closed {
+ return 0, ErrTxClosed
+ }
+
+ return sp.tx.CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+// SendBatch delegates to the underlying *Conn
+func (sp *dbSimulatedNestedTx) SendBatch(ctx context.Context, b *Batch) BatchResults {
+ if sp.closed {
+ return &batchResults{err: ErrTxClosed}
+ }
+
+ return sp.tx.SendBatch(ctx, b)
+}
+
+func (sp *dbSimulatedNestedTx) LargeObjects() LargeObjects {
+ return LargeObjects{tx: sp}
+}
+
+func (sp *dbSimulatedNestedTx) Conn() *Conn {
+ return sp.tx.Conn()
+}
+
+// BeginFunc calls Begin on db and then calls fn. If fn does not return an error then it calls Commit on db. If fn
+// returns an error it calls Rollback on db. The context will be used when executing the transaction control statements
+// (BEGIN, ROLLBACK, and COMMIT) but does not otherwise affect the execution of fn.
+func BeginFunc(
+ ctx context.Context,
+ db interface {
+ Begin(ctx context.Context) (Tx, error)
+ },
+ fn func(Tx) error,
+) (err error) {
+ var tx Tx
+ tx, err = db.Begin(ctx)
+ if err != nil {
+ return err
+ }
+
+ return beginFuncExec(ctx, tx, fn)
+}
+
+// BeginTxFunc calls BeginTx on db and then calls fn. If fn does not return an error then it calls Commit on db. If fn
+// returns an error it calls Rollback on db. The context will be used when executing the transaction control statements
+// (BEGIN, ROLLBACK, and COMMIT) but does not otherwise affect the execution of fn.
+func BeginTxFunc(
+ ctx context.Context,
+ db interface {
+ BeginTx(ctx context.Context, txOptions TxOptions) (Tx, error)
+ },
+ txOptions TxOptions,
+ fn func(Tx) error,
+) (err error) {
+ var tx Tx
+ tx, err = db.BeginTx(ctx, txOptions)
+ if err != nil {
+ return err
+ }
+
+ return beginFuncExec(ctx, tx, fn)
+}
+
+func beginFuncExec(ctx context.Context, tx Tx, fn func(Tx) error) (err error) {
+ defer func() {
+ rollbackErr := tx.Rollback(ctx)
+ if rollbackErr != nil && !errors.Is(rollbackErr, ErrTxClosed) {
+ err = rollbackErr
+ }
+ }()
+
+ fErr := fn(tx)
+ if fErr != nil {
+ _ = tx.Rollback(ctx) // ignore rollback error as there is already an error to return
+ return fErr
+ }
+
+ return tx.Commit(ctx)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/values.go b/vendor/github.com/jackc/pgx/v5/values.go
new file mode 100644
index 0000000..6e2ff30
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/values.go
@@ -0,0 +1,63 @@
+package pgx
+
+import (
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// PostgreSQL format codes
+const (
+ TextFormatCode = 0
+ BinaryFormatCode = 1
+)
+
+func convertSimpleArgument(m *pgtype.Map, arg any) (any, error) {
+ buf, err := m.Encode(0, TextFormatCode, arg, []byte{})
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+ return string(buf), nil
+}
+
+func encodeCopyValue(m *pgtype.Map, buf []byte, oid uint32, arg any) ([]byte, error) {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ argBuf, err := m.Encode(oid, BinaryFormatCode, arg, buf)
+ if err != nil {
+ if argBuf2, err2 := tryScanStringCopyValueThenEncode(m, buf, oid, arg); err2 == nil {
+ argBuf = argBuf2
+ } else {
+ return nil, err
+ }
+ }
+
+ if argBuf != nil {
+ buf = argBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ return buf, nil
+}
+
+func tryScanStringCopyValueThenEncode(m *pgtype.Map, buf []byte, oid uint32, arg any) ([]byte, error) {
+ s, ok := arg.(string)
+ if !ok {
+ textBuf, err := m.Encode(oid, TextFormatCode, arg, nil)
+ if err != nil {
+ return nil, errors.New("not a string and cannot be encoded as text")
+ }
+ s = string(textBuf)
+ }
+
+ var v any
+ err := m.Scan(oid, TextFormatCode, []byte(s), &v)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.Encode(oid, BinaryFormatCode, v, buf)
+}
diff --git a/vendor/github.com/jackc/puddle/v2/CHANGELOG.md b/vendor/github.com/jackc/puddle/v2/CHANGELOG.md
new file mode 100644
index 0000000..d0d202c
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/CHANGELOG.md
@@ -0,0 +1,79 @@
+# 2.2.2 (September 10, 2024)
+
+* Add empty acquire time to stats (Maxim Ivanov)
+* Stop importing nanotime from runtime via linkname (maypok86)
+
+# 2.2.1 (July 15, 2023)
+
+* Fix: CreateResource cannot overflow pool. This changes documented behavior of CreateResource. Previously,
+ CreateResource could create a resource even if the pool was full. This could cause the pool to overflow. While this
+ was documented, it was documenting incorrect behavior. CreateResource now returns an error if the pool is full.
+
+# 2.2.0 (February 11, 2023)
+
+* Use Go 1.19 atomics and drop go.uber.org/atomic dependency
+
+# 2.1.2 (November 12, 2022)
+
+* Restore support to Go 1.18 via go.uber.org/atomic
+
+# 2.1.1 (November 11, 2022)
+
+* Fix create resource concurrently with Stat call race
+
+# 2.1.0 (October 28, 2022)
+
+* Concurrency control is now implemented with a semaphore. This simplifies some internal logic, resolves a few error conditions (including a deadlock), and improves performance. (Jan Dubsky)
+* Go 1.19 is now required for the improved atomic support.
+
+# 2.0.1 (October 28, 2022)
+
+* Fix race condition when Close is called concurrently with multiple constructors
+
+# 2.0.0 (September 17, 2022)
+
+* Use generics instead of interface{} (Столяров Владимир Алексеевич)
+* Add Reset
+* Do not cancel resource construction when Acquire is canceled
+* NewPool takes Config
+
+# 1.3.0 (August 27, 2022)
+
+* Acquire creates resources in background to allow creation to continue after Acquire is canceled (James Hartig)
+
+# 1.2.1 (December 2, 2021)
+
+* TryAcquire now does not block when background constructing resource
+
+# 1.2.0 (November 20, 2021)
+
+* Add TryAcquire (A. Jensen)
+* Fix: remove memory leak / unintentionally pinned memory when shrinking slices (Alexander Staubo)
+* Fix: Do not leave pool locked after panic from nil context
+
+# 1.1.4 (September 11, 2021)
+
+* Fix: Deadlock in CreateResource if pool was closed during resource acquisition (Dmitriy Matrenichev)
+
+# 1.1.3 (December 3, 2020)
+
+* Fix: Failed resource creation could cause concurrent Acquire to hang. (Evgeny Vanslov)
+
+# 1.1.2 (September 26, 2020)
+
+* Fix: Resource.Destroy no longer removes itself from the pool before its destructor has completed.
+* Fix: Prevent crash when pool is closed while resource is being created.
+
+# 1.1.1 (April 2, 2020)
+
+* Pool.Close can be safely called multiple times
+* AcquireAllIDle immediately returns nil if pool is closed
+* CreateResource checks if pool is closed before taking any action
+* Fix potential race condition when CreateResource and Close are called concurrently. CreateResource now checks if pool is closed before adding newly created resource to pool.
+
+# 1.1.0 (February 5, 2020)
+
+* Use runtime.nanotime for faster tracking of acquire time and last usage time.
+* Track resource idle time to enable client health check logic. (Patrick Ellul)
+* Add CreateResource to construct a new resource without acquiring it. (Patrick Ellul)
+* Fix deadlock race when acquire is cancelled. (Michael Tharp)
diff --git a/vendor/github.com/jackc/puddle/v2/LICENSE b/vendor/github.com/jackc/puddle/v2/LICENSE
new file mode 100644
index 0000000..bcc286c
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2018 Jack Christensen
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/jackc/puddle/v2/README.md b/vendor/github.com/jackc/puddle/v2/README.md
new file mode 100644
index 0000000..fa82a9d
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/README.md
@@ -0,0 +1,80 @@
+[](https://pkg.go.dev/github.com/jackc/puddle/v2)
+
+
+# Puddle
+
+Puddle is a tiny generic resource pool library for Go that uses the standard
+context library to signal cancellation of acquires. It is designed to contain
+the minimum functionality required for a resource pool. It can be used directly
+or it can be used as the base for a domain specific resource pool. For example,
+a database connection pool may use puddle internally and implement health checks
+and keep-alive behavior without needing to implement any concurrent code of its
+own.
+
+## Features
+
+* Acquire cancellation via context standard library
+* Statistics API for monitoring pool pressure
+* No dependencies outside of standard library and golang.org/x/sync
+* High performance
+* 100% test coverage of reachable code
+
+## Example Usage
+
+```go
+package main
+
+import (
+ "context"
+ "log"
+ "net"
+
+ "github.com/jackc/puddle/v2"
+)
+
+func main() {
+ constructor := func(context.Context) (net.Conn, error) {
+ return net.Dial("tcp", "127.0.0.1:8080")
+ }
+ destructor := func(value net.Conn) {
+ value.Close()
+ }
+ maxPoolSize := int32(10)
+
+ pool, err := puddle.NewPool(&puddle.Config[net.Conn]{Constructor: constructor, Destructor: destructor, MaxSize: maxPoolSize})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Acquire resource from the pool.
+ res, err := pool.Acquire(context.Background())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Use resource.
+ _, err = res.Value().Write([]byte{1})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Release when done.
+ res.Release()
+}
+```
+
+## Status
+
+Puddle is stable and feature complete.
+
+* Bug reports and fixes are welcome.
+* New features will usually not be accepted if they can be feasibly implemented in a wrapper.
+* Performance optimizations will usually not be accepted unless the performance issue rises to the level of a bug.
+
+## Supported Go Versions
+
+puddle supports the same versions of Go that are supported by the Go project. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases. This means puddle supports Go 1.19 and higher.
+
+## License
+
+MIT
diff --git a/vendor/github.com/jackc/puddle/v2/context.go b/vendor/github.com/jackc/puddle/v2/context.go
new file mode 100644
index 0000000..e19d2a6
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/context.go
@@ -0,0 +1,24 @@
+package puddle
+
+import (
+ "context"
+ "time"
+)
+
+// valueCancelCtx combines two contexts into one. One context is used for values and the other is used for cancellation.
+type valueCancelCtx struct {
+ valueCtx context.Context
+ cancelCtx context.Context
+}
+
+func (ctx *valueCancelCtx) Deadline() (time.Time, bool) { return ctx.cancelCtx.Deadline() }
+func (ctx *valueCancelCtx) Done() <-chan struct{} { return ctx.cancelCtx.Done() }
+func (ctx *valueCancelCtx) Err() error { return ctx.cancelCtx.Err() }
+func (ctx *valueCancelCtx) Value(key any) any { return ctx.valueCtx.Value(key) }
+
+func newValueCancelCtx(valueCtx, cancelContext context.Context) context.Context {
+ return &valueCancelCtx{
+ valueCtx: valueCtx,
+ cancelCtx: cancelContext,
+ }
+}
diff --git a/vendor/github.com/jackc/puddle/v2/doc.go b/vendor/github.com/jackc/puddle/v2/doc.go
new file mode 100644
index 0000000..818e4a6
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/doc.go
@@ -0,0 +1,11 @@
+// Package puddle is a generic resource pool with type-parametrized api.
+/*
+
+Puddle is a tiny generic resource pool library for Go that uses the standard
+context library to signal cancellation of acquires. It is designed to contain
+the minimum functionality a resource pool needs that cannot be implemented
+without concurrency concerns. For example, a database connection pool may use
+puddle internally and implement health checks and keep-alive behavior without
+needing to implement any concurrent code of its own.
+*/
+package puddle
diff --git a/vendor/github.com/jackc/puddle/v2/internal/genstack/gen_stack.go b/vendor/github.com/jackc/puddle/v2/internal/genstack/gen_stack.go
new file mode 100644
index 0000000..7e4660c
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/internal/genstack/gen_stack.go
@@ -0,0 +1,85 @@
+package genstack
+
+// GenStack implements a generational stack.
+//
+// GenStack works as common stack except for the fact that all elements in the
+// older generation are guaranteed to be popped before any element in the newer
+// generation. New elements are always pushed to the current (newest)
+// generation.
+//
+// We could also say that GenStack behaves as a stack in case of a single
+// generation, but it behaves as a queue of individual generation stacks.
+type GenStack[T any] struct {
+ // We can represent arbitrary number of generations using 2 stacks. The
+ // new stack stores all new pushes and the old stack serves all reads.
+ // Old stack can represent multiple generations. If old == new, then all
+ // elements pushed in previous (not current) generations have already
+ // been popped.
+
+ old *stack[T]
+ new *stack[T]
+}
+
+// NewGenStack creates a new empty GenStack.
+func NewGenStack[T any]() *GenStack[T] {
+ s := &stack[T]{}
+ return &GenStack[T]{
+ old: s,
+ new: s,
+ }
+}
+
+func (s *GenStack[T]) Pop() (T, bool) {
+ // Pushes always append to the new stack, so if the old once becomes
+ // empty, it will remail empty forever.
+ if s.old.len() == 0 && s.old != s.new {
+ s.old = s.new
+ }
+
+ if s.old.len() == 0 {
+ var zero T
+ return zero, false
+ }
+
+ return s.old.pop(), true
+}
+
+// Push pushes a new element at the top of the stack.
+func (s *GenStack[T]) Push(v T) { s.new.push(v) }
+
+// NextGen starts a new stack generation.
+func (s *GenStack[T]) NextGen() {
+ if s.old == s.new {
+ s.new = &stack[T]{}
+ return
+ }
+
+ // We need to pop from the old stack to the top of the new stack. Let's
+ // have an example:
+ //
+ // Old: 4 3 2 1
+ // New: 8 7 6 5
+ // PopOrder: 1 2 3 4 5 6 7 8
+ //
+ //
+ // To preserve pop order, we have to take all elements from the old
+ // stack and push them to the top of new stack:
+ //
+ // New: 8 7 6 5 4 3 2 1
+ //
+ s.new.push(s.old.takeAll()...)
+
+ // We have the old stack allocated and empty, so why not to reuse it as
+ // new new stack.
+ s.old, s.new = s.new, s.old
+}
+
+// Len returns number of elements in the stack.
+func (s *GenStack[T]) Len() int {
+ l := s.old.len()
+ if s.old != s.new {
+ l += s.new.len()
+ }
+
+ return l
+}
diff --git a/vendor/github.com/jackc/puddle/v2/internal/genstack/stack.go b/vendor/github.com/jackc/puddle/v2/internal/genstack/stack.go
new file mode 100644
index 0000000..dbced0c
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/internal/genstack/stack.go
@@ -0,0 +1,39 @@
+package genstack
+
+// stack is a wrapper around an array implementing a stack.
+//
+// We cannot use slice to represent the stack because append might change the
+// pointer value of the slice. That would be an issue in GenStack
+// implementation.
+type stack[T any] struct {
+ arr []T
+}
+
+// push pushes a new element at the top of a stack.
+func (s *stack[T]) push(vs ...T) { s.arr = append(s.arr, vs...) }
+
+// pop pops the stack top-most element.
+//
+// If stack length is zero, this method panics.
+func (s *stack[T]) pop() T {
+ idx := s.len() - 1
+ val := s.arr[idx]
+
+ // Avoid memory leak
+ var zero T
+ s.arr[idx] = zero
+
+ s.arr = s.arr[:idx]
+ return val
+}
+
+// takeAll returns all elements in the stack in order as they are stored - i.e.
+// the top-most stack element is the last one.
+func (s *stack[T]) takeAll() []T {
+ arr := s.arr
+ s.arr = nil
+ return arr
+}
+
+// len returns number of elements in the stack.
+func (s *stack[T]) len() int { return len(s.arr) }
diff --git a/vendor/github.com/jackc/puddle/v2/log.go b/vendor/github.com/jackc/puddle/v2/log.go
new file mode 100644
index 0000000..b21b946
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/log.go
@@ -0,0 +1,32 @@
+package puddle
+
+import "unsafe"
+
+type ints interface {
+ int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
+}
+
+// log2Int returns log2 of an integer. This function panics if val < 0. For val
+// == 0, returns 0.
+func log2Int[T ints](val T) uint8 {
+ if val <= 0 {
+ panic("log2 of non-positive number does not exist")
+ }
+
+ return log2IntRange(val, 0, uint8(8*unsafe.Sizeof(val)))
+}
+
+func log2IntRange[T ints](val T, begin, end uint8) uint8 {
+ length := end - begin
+ if length == 1 {
+ return begin
+ }
+
+ delim := begin + length/2
+ mask := T(1) << delim
+ if mask > val {
+ return log2IntRange(val, begin, delim)
+ } else {
+ return log2IntRange(val, delim, end)
+ }
+}
diff --git a/vendor/github.com/jackc/puddle/v2/nanotime.go b/vendor/github.com/jackc/puddle/v2/nanotime.go
new file mode 100644
index 0000000..8a5351a
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/nanotime.go
@@ -0,0 +1,16 @@
+package puddle
+
+import "time"
+
+// nanotime returns the time in nanoseconds since process start.
+//
+// This approach, described at
+// https://github.com/golang/go/issues/61765#issuecomment-1672090302,
+// is fast, monotonic, and portable, and avoids the previous
+// dependence on runtime.nanotime using the (unsafe) linkname hack.
+// In particular, time.Since does less work than time.Now.
+func nanotime() int64 {
+ return time.Since(globalStart).Nanoseconds()
+}
+
+var globalStart = time.Now()
diff --git a/vendor/github.com/jackc/puddle/v2/pool.go b/vendor/github.com/jackc/puddle/v2/pool.go
new file mode 100644
index 0000000..c411d2f
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/pool.go
@@ -0,0 +1,710 @@
+package puddle
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/jackc/puddle/v2/internal/genstack"
+ "golang.org/x/sync/semaphore"
+)
+
+const (
+ resourceStatusConstructing = 0
+ resourceStatusIdle = iota
+ resourceStatusAcquired = iota
+ resourceStatusHijacked = iota
+)
+
+// ErrClosedPool occurs on an attempt to acquire a connection from a closed pool
+// or a pool that is closed while the acquire is waiting.
+var ErrClosedPool = errors.New("closed pool")
+
+// ErrNotAvailable occurs on an attempt to acquire a resource from a pool
+// that is at maximum capacity and has no available resources.
+var ErrNotAvailable = errors.New("resource not available")
+
+// Constructor is a function called by the pool to construct a resource.
+type Constructor[T any] func(ctx context.Context) (res T, err error)
+
+// Destructor is a function called by the pool to destroy a resource.
+type Destructor[T any] func(res T)
+
+// Resource is the resource handle returned by acquiring from the pool.
+type Resource[T any] struct {
+ value T
+ pool *Pool[T]
+ creationTime time.Time
+ lastUsedNano int64
+ poolResetCount int
+ status byte
+}
+
+// Value returns the resource value.
+func (res *Resource[T]) Value() T {
+ if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
+ panic("tried to access resource that is not acquired or hijacked")
+ }
+ return res.value
+}
+
+// Release returns the resource to the pool. res must not be subsequently used.
+func (res *Resource[T]) Release() {
+ if res.status != resourceStatusAcquired {
+ panic("tried to release resource that is not acquired")
+ }
+ res.pool.releaseAcquiredResource(res, nanotime())
+}
+
+// ReleaseUnused returns the resource to the pool without updating when it was last used used. i.e. LastUsedNanotime
+// will not change. res must not be subsequently used.
+func (res *Resource[T]) ReleaseUnused() {
+ if res.status != resourceStatusAcquired {
+ panic("tried to release resource that is not acquired")
+ }
+ res.pool.releaseAcquiredResource(res, res.lastUsedNano)
+}
+
+// Destroy returns the resource to the pool for destruction. res must not be
+// subsequently used.
+func (res *Resource[T]) Destroy() {
+ if res.status != resourceStatusAcquired {
+ panic("tried to destroy resource that is not acquired")
+ }
+ go res.pool.destroyAcquiredResource(res)
+}
+
+// Hijack assumes ownership of the resource from the pool. Caller is responsible
+// for cleanup of resource value.
+func (res *Resource[T]) Hijack() {
+ if res.status != resourceStatusAcquired {
+ panic("tried to hijack resource that is not acquired")
+ }
+ res.pool.hijackAcquiredResource(res)
+}
+
+// CreationTime returns when the resource was created by the pool.
+func (res *Resource[T]) CreationTime() time.Time {
+ if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
+ panic("tried to access resource that is not acquired or hijacked")
+ }
+ return res.creationTime
+}
+
+// LastUsedNanotime returns when Release was last called on the resource measured in nanoseconds from an arbitrary time
+// (a monotonic time). Returns creation time if Release has never been called. This is only useful to compare with
+// other calls to LastUsedNanotime. In almost all cases, IdleDuration should be used instead.
+func (res *Resource[T]) LastUsedNanotime() int64 {
+ if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
+ panic("tried to access resource that is not acquired or hijacked")
+ }
+
+ return res.lastUsedNano
+}
+
+// IdleDuration returns the duration since Release was last called on the resource. This is equivalent to subtracting
+// LastUsedNanotime to the current nanotime.
+func (res *Resource[T]) IdleDuration() time.Duration {
+ if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
+ panic("tried to access resource that is not acquired or hijacked")
+ }
+
+ return time.Duration(nanotime() - res.lastUsedNano)
+}
+
+// Pool is a concurrency-safe resource pool.
+type Pool[T any] struct {
+ // mux is the pool internal lock. Any modification of shared state of
+ // the pool (but Acquires of acquireSem) must be performed only by
+ // holder of the lock. Long running operations are not allowed when mux
+ // is held.
+ mux sync.Mutex
+ // acquireSem provides an allowance to acquire a resource.
+ //
+ // Releases are allowed only when caller holds mux. Acquires have to
+ // happen before mux is locked (doesn't apply to semaphore.TryAcquire in
+ // AcquireAllIdle).
+ acquireSem *semaphore.Weighted
+ destructWG sync.WaitGroup
+
+ allResources resList[T]
+ idleResources *genstack.GenStack[*Resource[T]]
+
+ constructor Constructor[T]
+ destructor Destructor[T]
+ maxSize int32
+
+ acquireCount int64
+ acquireDuration time.Duration
+ emptyAcquireCount int64
+ emptyAcquireWaitTime time.Duration
+ canceledAcquireCount atomic.Int64
+
+ resetCount int
+
+ baseAcquireCtx context.Context
+ cancelBaseAcquireCtx context.CancelFunc
+ closed bool
+}
+
+type Config[T any] struct {
+ Constructor Constructor[T]
+ Destructor Destructor[T]
+ MaxSize int32
+}
+
+// NewPool creates a new pool. Returns an error iff MaxSize is less than 1.
+func NewPool[T any](config *Config[T]) (*Pool[T], error) {
+ if config.MaxSize < 1 {
+ return nil, errors.New("MaxSize must be >= 1")
+ }
+
+ baseAcquireCtx, cancelBaseAcquireCtx := context.WithCancel(context.Background())
+
+ return &Pool[T]{
+ acquireSem: semaphore.NewWeighted(int64(config.MaxSize)),
+ idleResources: genstack.NewGenStack[*Resource[T]](),
+ maxSize: config.MaxSize,
+ constructor: config.Constructor,
+ destructor: config.Destructor,
+ baseAcquireCtx: baseAcquireCtx,
+ cancelBaseAcquireCtx: cancelBaseAcquireCtx,
+ }, nil
+}
+
+// Close destroys all resources in the pool and rejects future Acquire calls.
+// Blocks until all resources are returned to pool and destroyed.
+func (p *Pool[T]) Close() {
+ defer p.destructWG.Wait()
+
+ p.mux.Lock()
+ defer p.mux.Unlock()
+
+ if p.closed {
+ return
+ }
+ p.closed = true
+ p.cancelBaseAcquireCtx()
+
+ for res, ok := p.idleResources.Pop(); ok; res, ok = p.idleResources.Pop() {
+ p.allResources.remove(res)
+ go p.destructResourceValue(res.value)
+ }
+}
+
+// Stat is a snapshot of Pool statistics.
+type Stat struct {
+ constructingResources int32
+ acquiredResources int32
+ idleResources int32
+ maxResources int32
+ acquireCount int64
+ acquireDuration time.Duration
+ emptyAcquireCount int64
+ emptyAcquireWaitTime time.Duration
+ canceledAcquireCount int64
+}
+
+// TotalResources returns the total number of resources currently in the pool.
+// The value is the sum of ConstructingResources, AcquiredResources, and
+// IdleResources.
+func (s *Stat) TotalResources() int32 {
+ return s.constructingResources + s.acquiredResources + s.idleResources
+}
+
+// ConstructingResources returns the number of resources with construction in progress in
+// the pool.
+func (s *Stat) ConstructingResources() int32 {
+ return s.constructingResources
+}
+
+// AcquiredResources returns the number of currently acquired resources in the pool.
+func (s *Stat) AcquiredResources() int32 {
+ return s.acquiredResources
+}
+
+// IdleResources returns the number of currently idle resources in the pool.
+func (s *Stat) IdleResources() int32 {
+ return s.idleResources
+}
+
+// MaxResources returns the maximum size of the pool.
+func (s *Stat) MaxResources() int32 {
+ return s.maxResources
+}
+
+// AcquireCount returns the cumulative count of successful acquires from the pool.
+func (s *Stat) AcquireCount() int64 {
+ return s.acquireCount
+}
+
+// AcquireDuration returns the total duration of all successful acquires from
+// the pool.
+func (s *Stat) AcquireDuration() time.Duration {
+ return s.acquireDuration
+}
+
+// EmptyAcquireCount returns the cumulative count of successful acquires from the pool
+// that waited for a resource to be released or constructed because the pool was
+// empty.
+func (s *Stat) EmptyAcquireCount() int64 {
+ return s.emptyAcquireCount
+}
+
+// EmptyAcquireWaitTime returns the cumulative time waited for successful acquires
+// from the pool for a resource to be released or constructed because the pool was
+// empty.
+func (s *Stat) EmptyAcquireWaitTime() time.Duration {
+ return s.emptyAcquireWaitTime
+}
+
+// CanceledAcquireCount returns the cumulative count of acquires from the pool
+// that were canceled by a context.
+func (s *Stat) CanceledAcquireCount() int64 {
+ return s.canceledAcquireCount
+}
+
+// Stat returns the current pool statistics.
+func (p *Pool[T]) Stat() *Stat {
+ p.mux.Lock()
+ defer p.mux.Unlock()
+
+ s := &Stat{
+ maxResources: p.maxSize,
+ acquireCount: p.acquireCount,
+ emptyAcquireCount: p.emptyAcquireCount,
+ emptyAcquireWaitTime: p.emptyAcquireWaitTime,
+ canceledAcquireCount: p.canceledAcquireCount.Load(),
+ acquireDuration: p.acquireDuration,
+ }
+
+ for _, res := range p.allResources {
+ switch res.status {
+ case resourceStatusConstructing:
+ s.constructingResources += 1
+ case resourceStatusIdle:
+ s.idleResources += 1
+ case resourceStatusAcquired:
+ s.acquiredResources += 1
+ }
+ }
+
+ return s
+}
+
+// tryAcquireIdleResource checks if there is any idle resource. If there is
+// some, this method removes it from idle list and returns it. If the idle pool
+// is empty, this method returns nil and doesn't modify the idleResources slice.
+//
+// WARNING: Caller of this method must hold the pool mutex!
+func (p *Pool[T]) tryAcquireIdleResource() *Resource[T] {
+ res, ok := p.idleResources.Pop()
+ if !ok {
+ return nil
+ }
+
+ res.status = resourceStatusAcquired
+ return res
+}
+
+// createNewResource creates a new resource and inserts it into list of pool
+// resources.
+//
+// WARNING: Caller of this method must hold the pool mutex!
+func (p *Pool[T]) createNewResource() *Resource[T] {
+ res := &Resource[T]{
+ pool: p,
+ creationTime: time.Now(),
+ lastUsedNano: nanotime(),
+ poolResetCount: p.resetCount,
+ status: resourceStatusConstructing,
+ }
+
+ p.allResources.append(res)
+ p.destructWG.Add(1)
+
+ return res
+}
+
+// Acquire gets a resource from the pool. If no resources are available and the pool is not at maximum capacity it will
+// create a new resource. If the pool is at maximum capacity it will block until a resource is available. ctx can be
+// used to cancel the Acquire.
+//
+// If Acquire creates a new resource the resource constructor function will receive a context that delegates Value() to
+// ctx. Canceling ctx will cause Acquire to return immediately but it will not cancel the resource creation. This avoids
+// the problem of it being impossible to create resources when the time to create a resource is greater than any one
+// caller of Acquire is willing to wait.
+func (p *Pool[T]) Acquire(ctx context.Context) (_ *Resource[T], err error) {
+ select {
+ case <-ctx.Done():
+ p.canceledAcquireCount.Add(1)
+ return nil, ctx.Err()
+ default:
+ }
+
+ return p.acquire(ctx)
+}
+
+// acquire is a continuation of Acquire function that doesn't check context
+// validity.
+//
+// This function exists solely only for benchmarking purposes.
+func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
+ startNano := nanotime()
+
+ var waitedForLock bool
+ if !p.acquireSem.TryAcquire(1) {
+ waitedForLock = true
+ err := p.acquireSem.Acquire(ctx, 1)
+ if err != nil {
+ p.canceledAcquireCount.Add(1)
+ return nil, err
+ }
+ }
+
+ p.mux.Lock()
+ if p.closed {
+ p.acquireSem.Release(1)
+ p.mux.Unlock()
+ return nil, ErrClosedPool
+ }
+
+ // If a resource is available in the pool.
+ if res := p.tryAcquireIdleResource(); res != nil {
+ waitTime := time.Duration(nanotime() - startNano)
+ if waitedForLock {
+ p.emptyAcquireCount += 1
+ p.emptyAcquireWaitTime += waitTime
+ }
+ p.acquireCount += 1
+ p.acquireDuration += waitTime
+ p.mux.Unlock()
+ return res, nil
+ }
+
+ if len(p.allResources) >= int(p.maxSize) {
+ // Unreachable code.
+ panic("bug: semaphore allowed more acquires than pool allows")
+ }
+
+ // The resource is not idle, but there is enough space to create one.
+ res := p.createNewResource()
+ p.mux.Unlock()
+
+ res, err := p.initResourceValue(ctx, res)
+ if err != nil {
+ return nil, err
+ }
+
+ p.mux.Lock()
+ defer p.mux.Unlock()
+
+ p.emptyAcquireCount += 1
+ p.acquireCount += 1
+ waitTime := time.Duration(nanotime() - startNano)
+ p.acquireDuration += waitTime
+ p.emptyAcquireWaitTime += waitTime
+
+ return res, nil
+}
+
+func (p *Pool[T]) initResourceValue(ctx context.Context, res *Resource[T]) (*Resource[T], error) {
+ // Create the resource in a goroutine to immediately return from Acquire
+ // if ctx is canceled without also canceling the constructor.
+ //
+ // See:
+ // - https://github.com/jackc/pgx/issues/1287
+ // - https://github.com/jackc/pgx/issues/1259
+ constructErrChan := make(chan error)
+ go func() {
+ constructorCtx := newValueCancelCtx(ctx, p.baseAcquireCtx)
+ value, err := p.constructor(constructorCtx)
+ if err != nil {
+ p.mux.Lock()
+ p.allResources.remove(res)
+ p.destructWG.Done()
+
+ // The resource won't be acquired because its
+ // construction failed. We have to allow someone else to
+ // take that resouce.
+ p.acquireSem.Release(1)
+ p.mux.Unlock()
+
+ select {
+ case constructErrChan <- err:
+ case <-ctx.Done():
+ // The caller is cancelled, so no-one awaits the
+ // error. This branch avoid goroutine leak.
+ }
+ return
+ }
+
+ // The resource is already in p.allResources where it might be read. So we need to acquire the lock to update its
+ // status.
+ p.mux.Lock()
+ res.value = value
+ res.status = resourceStatusAcquired
+ p.mux.Unlock()
+
+ // This select works because the channel is unbuffered.
+ select {
+ case constructErrChan <- nil:
+ case <-ctx.Done():
+ p.releaseAcquiredResource(res, res.lastUsedNano)
+ }
+ }()
+
+ select {
+ case <-ctx.Done():
+ p.canceledAcquireCount.Add(1)
+ return nil, ctx.Err()
+ case err := <-constructErrChan:
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// TryAcquire gets a resource from the pool if one is immediately available. If not, it returns ErrNotAvailable. If no
+// resources are available but the pool has room to grow, a resource will be created in the background. ctx is only
+// used to cancel the background creation.
+func (p *Pool[T]) TryAcquire(ctx context.Context) (*Resource[T], error) {
+ if !p.acquireSem.TryAcquire(1) {
+ return nil, ErrNotAvailable
+ }
+
+ p.mux.Lock()
+ defer p.mux.Unlock()
+
+ if p.closed {
+ p.acquireSem.Release(1)
+ return nil, ErrClosedPool
+ }
+
+ // If a resource is available now
+ if res := p.tryAcquireIdleResource(); res != nil {
+ p.acquireCount += 1
+ return res, nil
+ }
+
+ if len(p.allResources) >= int(p.maxSize) {
+ // Unreachable code.
+ panic("bug: semaphore allowed more acquires than pool allows")
+ }
+
+ res := p.createNewResource()
+ go func() {
+ value, err := p.constructor(ctx)
+
+ p.mux.Lock()
+ defer p.mux.Unlock()
+ // We have to create the resource and only then release the
+ // semaphore - For the time being there is no resource that
+ // someone could acquire.
+ defer p.acquireSem.Release(1)
+
+ if err != nil {
+ p.allResources.remove(res)
+ p.destructWG.Done()
+ return
+ }
+
+ res.value = value
+ res.status = resourceStatusIdle
+ p.idleResources.Push(res)
+ }()
+
+ return nil, ErrNotAvailable
+}
+
+// acquireSemAll tries to acquire num free tokens from sem. This function is
+// guaranteed to acquire at least the lowest number of tokens that has been
+// available in the semaphore during runtime of this function.
+//
+// For the time being, semaphore doesn't allow to acquire all tokens atomically
+// (see https://github.com/golang/sync/pull/19). We simulate this by trying all
+// powers of 2 that are less or equal to num.
+//
+// For example, let's immagine we have 19 free tokens in the semaphore which in
+// total has 24 tokens (i.e. the maxSize of the pool is 24 resources). Then if
+// num is 24, the log2Uint(24) is 4 and we try to acquire 16, 8, 4, 2 and 1
+// tokens. Out of those, the acquire of 16, 2 and 1 tokens will succeed.
+//
+// Naturally, Acquires and Releases of the semaphore might take place
+// concurrently. For this reason, it's not guaranteed that absolutely all free
+// tokens in the semaphore will be acquired. But it's guaranteed that at least
+// the minimal number of tokens that has been present over the whole process
+// will be acquired. This is sufficient for the use-case we have in this
+// package.
+//
+// TODO: Replace this with acquireSem.TryAcquireAll() if it gets to
+// upstream. https://github.com/golang/sync/pull/19
+func acquireSemAll(sem *semaphore.Weighted, num int) int {
+ if sem.TryAcquire(int64(num)) {
+ return num
+ }
+
+ var acquired int
+ for i := int(log2Int(num)); i >= 0; i-- {
+ val := 1 << i
+ if sem.TryAcquire(int64(val)) {
+ acquired += val
+ }
+ }
+
+ return acquired
+}
+
+// AcquireAllIdle acquires all currently idle resources. Its intended use is for
+// health check and keep-alive functionality. It does not update pool
+// statistics.
+func (p *Pool[T]) AcquireAllIdle() []*Resource[T] {
+ p.mux.Lock()
+ defer p.mux.Unlock()
+
+ if p.closed {
+ return nil
+ }
+
+ numIdle := p.idleResources.Len()
+ if numIdle == 0 {
+ return nil
+ }
+
+ // In acquireSemAll we use only TryAcquire and not Acquire. Because
+ // TryAcquire cannot block, the fact that we hold mutex locked and try
+ // to acquire semaphore cannot result in dead-lock.
+ //
+ // Because the mutex is locked, no parallel Release can run. This
+ // implies that the number of tokens can only decrease because some
+ // Acquire/TryAcquire call can consume the semaphore token. Consequently
+ // acquired is always less or equal to numIdle. Moreover if acquired <
+ // numIdle, then there are some parallel Acquire/TryAcquire calls that
+ // will take the remaining idle connections.
+ acquired := acquireSemAll(p.acquireSem, numIdle)
+
+ idle := make([]*Resource[T], acquired)
+ for i := range idle {
+ res, _ := p.idleResources.Pop()
+ res.status = resourceStatusAcquired
+ idle[i] = res
+ }
+
+ // We have to bump the generation to ensure that Acquire/TryAcquire
+ // calls running in parallel (those which caused acquired < numIdle)
+ // will consume old connections and not freshly released connections
+ // instead.
+ p.idleResources.NextGen()
+
+ return idle
+}
+
+// CreateResource constructs a new resource without acquiring it. It goes straight in the IdlePool. If the pool is full
+// it returns an error. It can be useful to maintain warm resources under little load.
+func (p *Pool[T]) CreateResource(ctx context.Context) error {
+ if !p.acquireSem.TryAcquire(1) {
+ return ErrNotAvailable
+ }
+
+ p.mux.Lock()
+ if p.closed {
+ p.acquireSem.Release(1)
+ p.mux.Unlock()
+ return ErrClosedPool
+ }
+
+ if len(p.allResources) >= int(p.maxSize) {
+ p.acquireSem.Release(1)
+ p.mux.Unlock()
+ return ErrNotAvailable
+ }
+
+ res := p.createNewResource()
+ p.mux.Unlock()
+
+ value, err := p.constructor(ctx)
+ p.mux.Lock()
+ defer p.mux.Unlock()
+ defer p.acquireSem.Release(1)
+ if err != nil {
+ p.allResources.remove(res)
+ p.destructWG.Done()
+ return err
+ }
+
+ res.value = value
+ res.status = resourceStatusIdle
+
+ // If closed while constructing resource then destroy it and return an error
+ if p.closed {
+ go p.destructResourceValue(res.value)
+ return ErrClosedPool
+ }
+
+ p.idleResources.Push(res)
+
+ return nil
+}
+
+// Reset destroys all resources, but leaves the pool open. It is intended for use when an error is detected that would
+// disrupt all resources (such as a network interruption or a server state change).
+//
+// It is safe to reset a pool while resources are checked out. Those resources will be destroyed when they are returned
+// to the pool.
+func (p *Pool[T]) Reset() {
+ p.mux.Lock()
+ defer p.mux.Unlock()
+
+ p.resetCount++
+
+ for res, ok := p.idleResources.Pop(); ok; res, ok = p.idleResources.Pop() {
+ p.allResources.remove(res)
+ go p.destructResourceValue(res.value)
+ }
+}
+
+// releaseAcquiredResource returns res to the the pool.
+func (p *Pool[T]) releaseAcquiredResource(res *Resource[T], lastUsedNano int64) {
+ p.mux.Lock()
+ defer p.mux.Unlock()
+ defer p.acquireSem.Release(1)
+
+ if p.closed || res.poolResetCount != p.resetCount {
+ p.allResources.remove(res)
+ go p.destructResourceValue(res.value)
+ } else {
+ res.lastUsedNano = lastUsedNano
+ res.status = resourceStatusIdle
+ p.idleResources.Push(res)
+ }
+}
+
+// Remove removes res from the pool and closes it. If res is not part of the
+// pool Remove will panic.
+func (p *Pool[T]) destroyAcquiredResource(res *Resource[T]) {
+ p.destructResourceValue(res.value)
+
+ p.mux.Lock()
+ defer p.mux.Unlock()
+ defer p.acquireSem.Release(1)
+
+ p.allResources.remove(res)
+}
+
+func (p *Pool[T]) hijackAcquiredResource(res *Resource[T]) {
+ p.mux.Lock()
+ defer p.mux.Unlock()
+ defer p.acquireSem.Release(1)
+
+ p.allResources.remove(res)
+ res.status = resourceStatusHijacked
+ p.destructWG.Done() // not responsible for destructing hijacked resources
+}
+
+func (p *Pool[T]) destructResourceValue(value T) {
+ p.destructor(value)
+ p.destructWG.Done()
+}
diff --git a/vendor/github.com/jackc/puddle/v2/resource_list.go b/vendor/github.com/jackc/puddle/v2/resource_list.go
new file mode 100644
index 0000000..b243095
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/resource_list.go
@@ -0,0 +1,28 @@
+package puddle
+
+type resList[T any] []*Resource[T]
+
+func (l *resList[T]) append(val *Resource[T]) { *l = append(*l, val) }
+
+func (l *resList[T]) popBack() *Resource[T] {
+ idx := len(*l) - 1
+ val := (*l)[idx]
+ (*l)[idx] = nil // Avoid memory leak
+ *l = (*l)[:idx]
+
+ return val
+}
+
+func (l *resList[T]) remove(val *Resource[T]) {
+ for i, elem := range *l {
+ if elem == val {
+ lastIdx := len(*l) - 1
+ (*l)[i] = (*l)[lastIdx]
+ (*l)[lastIdx] = nil // Avoid memory leak
+ (*l) = (*l)[:lastIdx]
+ return
+ }
+ }
+
+ panic("BUG: removeResource could not find res in slice")
+}
diff --git a/vendor/github.com/joho/godotenv/.gitignore b/vendor/github.com/joho/godotenv/.gitignore
new file mode 100644
index 0000000..e43b0f9
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/.gitignore
@@ -0,0 +1 @@
+.DS_Store
diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE
new file mode 100644
index 0000000..e7ddd51
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/LICENCE
@@ -0,0 +1,23 @@
+Copyright (c) 2013 John Barton
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md
new file mode 100644
index 0000000..bfbe66a
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/README.md
@@ -0,0 +1,202 @@
+# GoDotEnv  [](https://goreportcard.com/report/github.com/joho/godotenv)
+
+A Go (golang) port of the Ruby [dotenv](https://github.com/bkeepers/dotenv) project (which loads env vars from a .env file).
+
+From the original Library:
+
+> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables.
+>
+> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped.
+
+It can be used as a library (for loading in env for your own daemons etc.) or as a bin command.
+
+There is test coverage and CI for both linuxish and Windows environments, but I make no guarantees about the bin version working on Windows.
+
+## Installation
+
+As a library
+
+```shell
+go get github.com/joho/godotenv
+```
+
+or if you want to use it as a bin command
+
+go >= 1.17
+```shell
+go install github.com/joho/godotenv/cmd/godotenv@latest
+```
+
+go < 1.17
+```shell
+go get github.com/joho/godotenv/cmd/godotenv
+```
+
+## Usage
+
+Add your application configuration to your `.env` file in the root of your project:
+
+```shell
+S3_BUCKET=YOURS3BUCKET
+SECRET_KEY=YOURSECRETKEYGOESHERE
+```
+
+Then in your Go app you can do something like
+
+```go
+package main
+
+import (
+ "log"
+ "os"
+
+ "github.com/joho/godotenv"
+)
+
+func main() {
+ err := godotenv.Load()
+ if err != nil {
+ log.Fatal("Error loading .env file")
+ }
+
+ s3Bucket := os.Getenv("S3_BUCKET")
+ secretKey := os.Getenv("SECRET_KEY")
+
+ // now do something with s3 or whatever
+}
+```
+
+If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import
+
+```go
+import _ "github.com/joho/godotenv/autoload"
+```
+
+While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit
+
+```go
+godotenv.Load("somerandomfile")
+godotenv.Load("filenumberone.env", "filenumbertwo.env")
+```
+
+If you want to be really fancy with your env file you can do comments and exports (below is a valid env file)
+
+```shell
+# I am a comment and that is OK
+SOME_VAR=someval
+FOO=BAR # comments at line end are OK too
+export BAR=BAZ
+```
+
+Or finally you can do YAML(ish) style
+
+```yaml
+FOO: bar
+BAR: baz
+```
+
+as a final aside, if you don't want godotenv munging your env you can just get a map back instead
+
+```go
+var myEnv map[string]string
+myEnv, err := godotenv.Read()
+
+s3Bucket := myEnv["S3_BUCKET"]
+```
+
+... or from an `io.Reader` instead of a local file
+
+```go
+reader := getRemoteFile()
+myEnv, err := godotenv.Parse(reader)
+```
+
+... or from a `string` if you so desire
+
+```go
+content := getRemoteFileContent()
+myEnv, err := godotenv.Unmarshal(content)
+```
+
+### Precedence & Conventions
+
+Existing envs take precedence of envs that are loaded later.
+
+The [convention](https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use)
+for managing multiple environments (i.e. development, test, production)
+is to create an env named `{YOURAPP}_ENV` and load envs in this order:
+
+```go
+env := os.Getenv("FOO_ENV")
+if "" == env {
+ env = "development"
+}
+
+godotenv.Load(".env." + env + ".local")
+if "test" != env {
+ godotenv.Load(".env.local")
+}
+godotenv.Load(".env." + env)
+godotenv.Load() // The Original .env
+```
+
+If you need to, you can also use `godotenv.Overload()` to defy this convention
+and overwrite existing envs instead of only supplanting them. Use with caution.
+
+### Command Mode
+
+Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH`
+
+```
+godotenv -f /some/path/to/.env some_command with some args
+```
+
+If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD`
+
+By default, it won't override existing environment variables; you can do that with the `-o` flag.
+
+### Writing Env Files
+
+Godotenv can also write a map representing the environment to a correctly-formatted and escaped file
+
+```go
+env, err := godotenv.Unmarshal("KEY=value")
+err := godotenv.Write(env, "./.env")
+```
+
+... or to a string
+
+```go
+env, err := godotenv.Unmarshal("KEY=value")
+content, err := godotenv.Marshal(env)
+```
+
+## Contributing
+
+Contributions are welcome, but with some caveats.
+
+This library has been declared feature complete (see [#182](https://github.com/joho/godotenv/issues/182) for background) and will not be accepting issues or pull requests adding new functionality or breaking the library API.
+
+Contributions would be gladly accepted that:
+
+* bring this library's parsing into closer compatibility with the mainline dotenv implementations, in particular [Ruby's dotenv](https://github.com/bkeepers/dotenv) and [Node.js' dotenv](https://github.com/motdotla/dotenv)
+* keep the library up to date with the go ecosystem (ie CI bumps, documentation changes, changes in the core libraries)
+* bug fixes for use cases that pertain to the library's purpose of easing development of codebases deployed into twelve factor environments
+
+*code changes without tests and references to peer dotenv implementations will not be accepted*
+
+1. Fork it
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Commit your changes (`git commit -am 'Added some feature'`)
+4. Push to the branch (`git push origin my-new-feature`)
+5. Create new Pull Request
+
+## Releases
+
+Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`.
+
+Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1`
+
+## Who?
+
+The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library.
diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go
new file mode 100644
index 0000000..61b0ebb
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/godotenv.go
@@ -0,0 +1,228 @@
+// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
+//
+// Examples/readme can be found on the GitHub page at https://github.com/joho/godotenv
+//
+// The TL;DR is that you make a .env file that looks something like
+//
+// SOME_ENV_VAR=somevalue
+//
+// and then in your go code you can call
+//
+// godotenv.Load()
+//
+// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR")
+package godotenv
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+const doubleQuoteSpecialChars = "\\\n\r\"!$`"
+
+// Parse reads an env file from io.Reader, returning a map of keys and values.
+func Parse(r io.Reader) (map[string]string, error) {
+ var buf bytes.Buffer
+ _, err := io.Copy(&buf, r)
+ if err != nil {
+ return nil, err
+ }
+
+ return UnmarshalBytes(buf.Bytes())
+}
+
+// Load will read your env file(s) and load them into ENV for this process.
+//
+// Call this function as close as possible to the start of your program (ideally in main).
+//
+// If you call Load without any args it will default to loading .env in the current path.
+//
+// You can otherwise tell it which files to load (there can be more than one) like:
+//
+// godotenv.Load("fileone", "filetwo")
+//
+// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults.
+func Load(filenames ...string) (err error) {
+ filenames = filenamesOrDefault(filenames)
+
+ for _, filename := range filenames {
+ err = loadFile(filename, false)
+ if err != nil {
+ return // return early on a spazout
+ }
+ }
+ return
+}
+
+// Overload will read your env file(s) and load them into ENV for this process.
+//
+// Call this function as close as possible to the start of your program (ideally in main).
+//
+// If you call Overload without any args it will default to loading .env in the current path.
+//
+// You can otherwise tell it which files to load (there can be more than one) like:
+//
+// godotenv.Overload("fileone", "filetwo")
+//
+// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefully set all vars.
+func Overload(filenames ...string) (err error) {
+ filenames = filenamesOrDefault(filenames)
+
+ for _, filename := range filenames {
+ err = loadFile(filename, true)
+ if err != nil {
+ return // return early on a spazout
+ }
+ }
+ return
+}
+
+// Read all env (with same file loading semantics as Load) but return values as
+// a map rather than automatically writing values into env
+func Read(filenames ...string) (envMap map[string]string, err error) {
+ filenames = filenamesOrDefault(filenames)
+ envMap = make(map[string]string)
+
+ for _, filename := range filenames {
+ individualEnvMap, individualErr := readFile(filename)
+
+ if individualErr != nil {
+ err = individualErr
+ return // return early on a spazout
+ }
+
+ for key, value := range individualEnvMap {
+ envMap[key] = value
+ }
+ }
+
+ return
+}
+
+// Unmarshal reads an env file from a string, returning a map of keys and values.
+func Unmarshal(str string) (envMap map[string]string, err error) {
+ return UnmarshalBytes([]byte(str))
+}
+
+// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values.
+func UnmarshalBytes(src []byte) (map[string]string, error) {
+ out := make(map[string]string)
+ err := parseBytes(src, out)
+
+ return out, err
+}
+
+// Exec loads env vars from the specified filenames (empty map falls back to default)
+// then executes the cmd specified.
+//
+// Simply hooks up os.Stdin/err/out to the command and calls Run().
+//
+// If you want more fine grained control over your command it's recommended
+// that you use `Load()`, `Overload()` or `Read()` and the `os/exec` package yourself.
+func Exec(filenames []string, cmd string, cmdArgs []string, overload bool) error {
+ op := Load
+ if overload {
+ op = Overload
+ }
+ if err := op(filenames...); err != nil {
+ return err
+ }
+
+ command := exec.Command(cmd, cmdArgs...)
+ command.Stdin = os.Stdin
+ command.Stdout = os.Stdout
+ command.Stderr = os.Stderr
+ return command.Run()
+}
+
+// Write serializes the given environment and writes it to a file.
+func Write(envMap map[string]string, filename string) error {
+ content, err := Marshal(envMap)
+ if err != nil {
+ return err
+ }
+ file, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ _, err = file.WriteString(content + "\n")
+ if err != nil {
+ return err
+ }
+ return file.Sync()
+}
+
+// Marshal outputs the given environment as a dotenv-formatted environment file.
+// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped.
+func Marshal(envMap map[string]string) (string, error) {
+ lines := make([]string, 0, len(envMap))
+ for k, v := range envMap {
+ if d, err := strconv.Atoi(v); err == nil {
+ lines = append(lines, fmt.Sprintf(`%s=%d`, k, d))
+ } else {
+ lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v)))
+ }
+ }
+ sort.Strings(lines)
+ return strings.Join(lines, "\n"), nil
+}
+
+func filenamesOrDefault(filenames []string) []string {
+ if len(filenames) == 0 {
+ return []string{".env"}
+ }
+ return filenames
+}
+
+func loadFile(filename string, overload bool) error {
+ envMap, err := readFile(filename)
+ if err != nil {
+ return err
+ }
+
+ currentEnv := map[string]bool{}
+ rawEnv := os.Environ()
+ for _, rawEnvLine := range rawEnv {
+ key := strings.Split(rawEnvLine, "=")[0]
+ currentEnv[key] = true
+ }
+
+ for key, value := range envMap {
+ if !currentEnv[key] || overload {
+ _ = os.Setenv(key, value)
+ }
+ }
+
+ return nil
+}
+
+func readFile(filename string) (envMap map[string]string, err error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ return Parse(file)
+}
+
+func doubleQuoteEscape(line string) string {
+ for _, c := range doubleQuoteSpecialChars {
+ toReplace := "\\" + string(c)
+ if c == '\n' {
+ toReplace = `\n`
+ }
+ if c == '\r' {
+ toReplace = `\r`
+ }
+ line = strings.Replace(line, string(c), toReplace, -1)
+ }
+ return line
+}
diff --git a/vendor/github.com/joho/godotenv/parser.go b/vendor/github.com/joho/godotenv/parser.go
new file mode 100644
index 0000000..cc709af
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/parser.go
@@ -0,0 +1,271 @@
+package godotenv
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+const (
+ charComment = '#'
+ prefixSingleQuote = '\''
+ prefixDoubleQuote = '"'
+
+ exportPrefix = "export"
+)
+
+func parseBytes(src []byte, out map[string]string) error {
+ src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+ cutset := src
+ for {
+ cutset = getStatementStart(cutset)
+ if cutset == nil {
+ // reached end of file
+ break
+ }
+
+ key, left, err := locateKeyName(cutset)
+ if err != nil {
+ return err
+ }
+
+ value, left, err := extractVarValue(left, out)
+ if err != nil {
+ return err
+ }
+
+ out[key] = value
+ cutset = left
+ }
+
+ return nil
+}
+
+// getStatementPosition returns position of statement begin.
+//
+// It skips any comment line or non-whitespace character.
+func getStatementStart(src []byte) []byte {
+ pos := indexOfNonSpaceChar(src)
+ if pos == -1 {
+ return nil
+ }
+
+ src = src[pos:]
+ if src[0] != charComment {
+ return src
+ }
+
+ // skip comment section
+ pos = bytes.IndexFunc(src, isCharFunc('\n'))
+ if pos == -1 {
+ return nil
+ }
+
+ return getStatementStart(src[pos:])
+}
+
+// locateKeyName locates and parses key name and returns rest of slice
+func locateKeyName(src []byte) (key string, cutset []byte, err error) {
+ // trim "export" and space at beginning
+ src = bytes.TrimLeftFunc(src, isSpace)
+ if bytes.HasPrefix(src, []byte(exportPrefix)) {
+ trimmed := bytes.TrimPrefix(src, []byte(exportPrefix))
+ if bytes.IndexFunc(trimmed, isSpace) == 0 {
+ src = bytes.TrimLeftFunc(trimmed, isSpace)
+ }
+ }
+
+ // locate key name end and validate it in single loop
+ offset := 0
+loop:
+ for i, char := range src {
+ rchar := rune(char)
+ if isSpace(rchar) {
+ continue
+ }
+
+ switch char {
+ case '=', ':':
+ // library also supports yaml-style value declaration
+ key = string(src[0:i])
+ offset = i + 1
+ break loop
+ case '_':
+ default:
+ // variable name should match [A-Za-z0-9_.]
+ if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) || rchar == '.' {
+ continue
+ }
+
+ return "", nil, fmt.Errorf(
+ `unexpected character %q in variable name near %q`,
+ string(char), string(src))
+ }
+ }
+
+ if len(src) == 0 {
+ return "", nil, errors.New("zero length string")
+ }
+
+ // trim whitespace
+ key = strings.TrimRightFunc(key, unicode.IsSpace)
+ cutset = bytes.TrimLeftFunc(src[offset:], isSpace)
+ return key, cutset, nil
+}
+
+// extractVarValue extracts variable value and returns rest of slice
+func extractVarValue(src []byte, vars map[string]string) (value string, rest []byte, err error) {
+ quote, hasPrefix := hasQuotePrefix(src)
+ if !hasPrefix {
+ // unquoted value - read until end of line
+ endOfLine := bytes.IndexFunc(src, isLineEnd)
+
+ // Hit EOF without a trailing newline
+ if endOfLine == -1 {
+ endOfLine = len(src)
+
+ if endOfLine == 0 {
+ return "", nil, nil
+ }
+ }
+
+ // Convert line to rune away to do accurate countback of runes
+ line := []rune(string(src[0:endOfLine]))
+
+ // Assume end of line is end of var
+ endOfVar := len(line)
+ if endOfVar == 0 {
+ return "", src[endOfLine:], nil
+ }
+
+ // Work backwards to check if the line ends in whitespace then
+ // a comment (ie asdasd # some comment)
+ for i := endOfVar - 1; i >= 0; i-- {
+ if line[i] == charComment && i > 0 {
+ if isSpace(line[i-1]) {
+ endOfVar = i
+ break
+ }
+ }
+ }
+
+ trimmed := strings.TrimFunc(string(line[0:endOfVar]), isSpace)
+
+ return expandVariables(trimmed, vars), src[endOfLine:], nil
+ }
+
+ // lookup quoted string terminator
+ for i := 1; i < len(src); i++ {
+ if char := src[i]; char != quote {
+ continue
+ }
+
+ // skip escaped quote symbol (\" or \', depends on quote)
+ if prevChar := src[i-1]; prevChar == '\\' {
+ continue
+ }
+
+ // trim quotes
+ trimFunc := isCharFunc(rune(quote))
+ value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc))
+ if quote == prefixDoubleQuote {
+ // unescape newlines for double quote (this is compat feature)
+ // and expand environment variables
+ value = expandVariables(expandEscapes(value), vars)
+ }
+
+ return value, src[i+1:], nil
+ }
+
+ // return formatted error if quoted string is not terminated
+ valEndIndex := bytes.IndexFunc(src, isCharFunc('\n'))
+ if valEndIndex == -1 {
+ valEndIndex = len(src)
+ }
+
+ return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex])
+}
+
+func expandEscapes(str string) string {
+ out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string {
+ c := strings.TrimPrefix(match, `\`)
+ switch c {
+ case "n":
+ return "\n"
+ case "r":
+ return "\r"
+ default:
+ return match
+ }
+ })
+ return unescapeCharsRegex.ReplaceAllString(out, "$1")
+}
+
+func indexOfNonSpaceChar(src []byte) int {
+ return bytes.IndexFunc(src, func(r rune) bool {
+ return !unicode.IsSpace(r)
+ })
+}
+
+// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character
+func hasQuotePrefix(src []byte) (prefix byte, isQuored bool) {
+ if len(src) == 0 {
+ return 0, false
+ }
+
+ switch prefix := src[0]; prefix {
+ case prefixDoubleQuote, prefixSingleQuote:
+ return prefix, true
+ default:
+ return 0, false
+ }
+}
+
+func isCharFunc(char rune) func(rune) bool {
+ return func(v rune) bool {
+ return v == char
+ }
+}
+
+// isSpace reports whether the rune is a space character but not line break character
+//
+// this differs from unicode.IsSpace, which also applies line break as space
+func isSpace(r rune) bool {
+ switch r {
+ case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0:
+ return true
+ }
+ return false
+}
+
+func isLineEnd(r rune) bool {
+ if r == '\n' || r == '\r' {
+ return true
+ }
+ return false
+}
+
+var (
+ escapeRegex = regexp.MustCompile(`\\.`)
+ expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`)
+ unescapeCharsRegex = regexp.MustCompile(`\\([^$])`)
+)
+
+func expandVariables(v string, m map[string]string) string {
+ return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string {
+ submatch := expandVarRegex.FindStringSubmatch(s)
+
+ if submatch == nil {
+ return s
+ }
+ if submatch[1] == "\\" || submatch[2] == "(" {
+ return submatch[0][1:]
+ } else if submatch[4] != "" {
+ return m[submatch[4]]
+ }
+ return s
+ })
+}
diff --git a/vendor/github.com/redis/go-redis/v9/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore
new file mode 100644
index 0000000..0d99709
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/.gitignore
@@ -0,0 +1,11 @@
+*.rdb
+testdata/*
+.idea/
+.DS_Store
+*.tar.gz
+*.dic
+redis8tests.sh
+coverage.txt
+**/coverage.txt
+.vscode
+tmp/*
diff --git a/vendor/github.com/redis/go-redis/v9/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml
new file mode 100644
index 0000000..872454f
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/.golangci.yml
@@ -0,0 +1,34 @@
+version: "2"
+run:
+ timeout: 5m
+ tests: false
+linters:
+ settings:
+ staticcheck:
+ checks:
+ - all
+ # Incorrect or missing package comment.
+ # https://staticcheck.dev/docs/checks/#ST1000
+ - -ST1000
+ # Omit embedded fields from selector expression.
+ # https://staticcheck.dev/docs/checks/#QF1008
+ - -QF1008
+ - -ST1003
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/redis/go-redis/v9/.prettierrc.yml b/vendor/github.com/redis/go-redis/v9/.prettierrc.yml
new file mode 100644
index 0000000..8b7f044
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/.prettierrc.yml
@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100
diff --git a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md
new file mode 100644
index 0000000..e1652b1
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md
@@ -0,0 +1,133 @@
+## Unreleased
+
+### Changed
+
+* `go-redis` won't skip span creation if the parent spans is not recording. ([#2980](https://github.com/redis/go-redis/issues/2980))
+ Users can use the OpenTelemetry sampler to control the sampling behavior.
+ For instance, you can use the `ParentBased(NeverSample())` sampler from `go.opentelemetry.io/otel/sdk/trace` to keep
+ a similar behavior (drop orphan spans) of `go-redis` as before.
+
+## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29)
+
+
+### Features
+
+* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602))
+* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe))
+* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af))
+
+
+
+## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01)
+
+
+### Bug Fixes
+
+* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241))
+
+
+### Features
+
+* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e))
+* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8))
+* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af))
+
+
+
+## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02)
+
+### New Features
+
+- feat(scan): scan time.Time sets the default decoding (#2413)
+- Add support for CLUSTER LINKS command (#2504)
+- Add support for acl dryrun command (#2502)
+- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500)
+- Add support for LCS Command (#2480)
+- Add support for BZMPOP (#2456)
+- Adding support for ZMPOP command (#2408)
+- Add support for LMPOP (#2440)
+- feat: remove pool unused fields (#2438)
+- Expiretime and PExpireTime (#2426)
+- Implement `FUNCTION` group of commands (#2475)
+- feat(zadd): add ZAddLT and ZAddGT (#2429)
+- Add: Support for COMMAND LIST command (#2491)
+- Add support for BLMPOP (#2442)
+- feat: check pipeline.Do to prevent confusion with Exec (#2517)
+- Function stats, function kill, fcall and fcall_ro (#2486)
+- feat: Add support for CLUSTER SHARDS command (#2507)
+- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498)
+
+### Fixed
+
+- fix: eval api cmd.SetFirstKeyPos (#2501)
+- fix: limit the number of connections created (#2441)
+- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479)
+- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458)
+- fix: group lag can be null (#2448)
+
+### Maintenance
+
+- Updating to the latest version of redis (#2508)
+- Allowing for running tests on a port other than the fixed 6380 (#2466)
+- redis 7.0.8 in tests (#2450)
+- docs: Update redisotel example for v9 (#2425)
+- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476)
+- chore: add Chinese translation (#2436)
+- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421)
+- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420)
+- chore(deps): bump actions/setup-go from 3 to 4 (#2495)
+- docs: add instructions for the HSet api (#2503)
+- docs: add reading lag field comment (#2451)
+- test: update go mod before testing(go mod tidy) (#2423)
+- docs: fix comment typo (#2505)
+- test: remove testify (#2463)
+- refactor: change ListElementCmd to KeyValuesCmd. (#2443)
+- fix(appendArg): appendArg case special type (#2489)
+
+## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01)
+
+### Features
+
+* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65))
+
+## v9 2023-01-30
+
+### Breaking
+
+- Changed Pipelines to not be thread-safe any more.
+
+### Added
+
+- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was
+ contributed by @monkey92t who has done the majority of work in this release.
+- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts
+ and deadlines. See
+ [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details.
+- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example,
+ `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`.
+- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html)
+- Added `redis.HasErrorPrefix` to help working with errors.
+
+### Changed
+
+- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is
+ completely gone in v9.
+- Reworked hook interface and added `DialHook`.
+- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See
+ [example](example/otel) and
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html).
+- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making
+ an allocation.
+- Renamed the option `MaxConnAge` to `ConnMaxLifetime`.
+- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`.
+- Removed connection reaper in favor of `MaxIdleConns`.
+- Removed `WithContext` since `context.Context` can be passed directly as an arg.
+- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and
+ it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to
+ reset commands for some reason.
+
+### Fixed
+
+- Improved and fixed pipeline retries.
+- As usually, added support for more commands and fixed some bugs.
diff --git a/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md
new file mode 100644
index 0000000..7228a4a
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md
@@ -0,0 +1,118 @@
+# Contributing
+
+## Introduction
+
+We appreciate your interest in considering contributing to go-redis.
+Community contributions mean a lot to us.
+
+## Contributions we need
+
+You may already know how you'd like to contribute, whether it's a fix for a bug you
+encountered, or a new feature your team wants to use.
+
+If you don't know where to start, consider improving
+documentation, bug triaging, and writing tutorials are all examples of
+helpful contributions that mean less work for you.
+
+## Your First Contribution
+
+Unsure where to begin contributing? You can start by looking through
+[help-wanted
+issues](https://github.com/redis/go-redis/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted).
+
+Never contributed to open source before? Here are a couple of friendly
+tutorials:
+
+-
+-
+
+## Getting Started
+
+Here's how to get started with your code contribution:
+
+1. Create your own fork of go-redis
+2. Do the changes in your fork
+3. If you need a development environment, run `make docker.start`.
+
+> Note: this clones and builds the docker containers specified in `docker-compose.yml`, to understand more about
+> the infrastructure that will be started you can check the `docker-compose.yml`. You also have the possiblity
+> to specify the redis image that will be pulled with the env variable `CLIENT_LIBS_TEST_IMAGE`.
+> By default the docker image that will be pulled and started is `redislabs/client-libs-test:rs-7.4.0-v2`.
+> If you want to test with newer Redis version, using a newer version of `redislabs/client-libs-test` should work out of the box.
+
+4. While developing, make sure the tests pass by running `make test` (if you have the docker containers running, `make test.ci` may be sufficient).
+> Note: `make test` will try to start all containers, run the tests with `make test.ci` and then stop all containers.
+5. If you like the change and think the project could use it, send a
+ pull request
+
+To see what else is part of the automation, run `invoke -l`
+
+
+## Testing
+
+### Setting up Docker
+To run the tests, you need to have Docker installed and running. If you are using a host OS that does not support
+docker host networks out of the box (e.g. Windows, OSX), you need to set up a docker desktop and enable docker host networks.
+
+### Running tests
+Call `make test` to run all tests.
+
+Continuous Integration uses these same wrappers to run all of these
+tests against multiple versions of redis. Feel free to test your
+changes against all the go versions supported, as declared by the
+[build.yml](./.github/workflows/build.yml) file.
+
+### Troubleshooting
+
+If you get any errors when running `make test`, make sure
+that you are using supported versions of Docker and go.
+
+## How to Report a Bug
+
+### Security Vulnerabilities
+
+**NOTE**: If you find a security vulnerability, do NOT open an issue.
+Email [Redis Open Source ()](mailto:oss@redis.com) instead.
+
+In order to determine whether you are dealing with a security issue, ask
+yourself these two questions:
+
+- Can I access something that's not mine, or something I shouldn't
+ have access to?
+- Can I disable something for other people?
+
+If the answer to either of those two questions are *yes*, then you're
+probably dealing with a security issue. Note that even if you answer
+*no* to both questions, you may still be dealing with a security
+issue, so if you're unsure, just email [us](mailto:oss@redis.com).
+
+### Everything Else
+
+When filing an issue, make sure to answer these five questions:
+
+1. What version of go-redis are you using?
+2. What version of redis are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+## Suggest a feature or enhancement
+
+If you'd like to contribute a new feature, make sure you check our
+issue list to see if someone has already proposed it. Work may already
+be underway on the feature you want or we may have rejected a
+feature like it already.
+
+If you don't see anything, open a new issue that describes the feature
+you would like and how it should work.
+
+## Code review process
+
+The core team regularly looks at pull requests. We will provide
+feedback as soon as possible. After receiving our feedback, please respond
+within two weeks. After that time, we may close your PR if it isn't
+showing any activity.
+
+## Support
+
+Maintainers can provide limited support to contributors on discord: https://discord.gg/W4txy5AeKM
diff --git a/vendor/github.com/redis/go-redis/v9/LICENSE b/vendor/github.com/redis/go-redis/v9/LICENSE
new file mode 100644
index 0000000..f4967db
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/redis/go-redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile
new file mode 100644
index 0000000..fc175f5
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/Makefile
@@ -0,0 +1,43 @@
+GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+docker.start:
+ docker compose --profile all up -d --quiet-pull
+
+docker.stop:
+ docker compose --profile all down
+
+test:
+ $(MAKE) docker.start
+ $(MAKE) test.ci
+ $(MAKE) docker.stop
+
+test.ci:
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go test in $${dir}"; \
+ (cd "$${dir}" && \
+ go mod tidy -compat=1.18 && \
+ go vet && \
+ go test -v -coverprofile=coverage.txt -covermode=atomic ./... -race); \
+ done
+ cd internal/customvet && go build .
+ go vet -vettool ./internal/customvet/customvet
+
+bench:
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test bench fmt
+
+build:
+ go build .
+
+fmt:
+ gofumpt -w ./
+ goimports -w -local github.com/redis/go-redis ./
+
+go_mod_tidy:
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go mod tidy in $${dir}"; \
+ (cd "$${dir}" && \
+ go get -u ./... && \
+ go mod tidy -compat=1.18); \
+ done
diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md
new file mode 100644
index 0000000..c37a52e
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/README.md
@@ -0,0 +1,458 @@
+# Redis client for Go
+
+[](https://github.com/redis/go-redis/actions)
+[](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc)
+[](https://redis.uptrace.dev/)
+[](https://goreportcard.com/report/github.com/redis/go-redis/v9)
+[](https://codecov.io/github/redis/go-redis)
+
+[](https://discord.gg/W4txy5AeKM)
+[](https://www.twitch.tv/redisinc)
+[](https://www.youtube.com/redisinc)
+[](https://twitter.com/redisinc)
+[](https://stackoverflow.com/questions/tagged/go-redis)
+
+> go-redis is the official Redis client library for the Go programming language. It offers a straightforward interface for interacting with Redis servers.
+
+## Supported versions
+
+In `go-redis` we are aiming to support the last three releases of Redis. Currently, this means we do support:
+- [Redis 7.2](https://raw.githubusercontent.com/redis/redis/7.2/00-RELEASENOTES) - using Redis Stack 7.2 for modules support
+- [Redis 7.4](https://raw.githubusercontent.com/redis/redis/7.4/00-RELEASENOTES) - using Redis Stack 7.4 for modules support
+- [Redis 8.0](https://raw.githubusercontent.com/redis/redis/8.0/00-RELEASENOTES) - using Redis CE 8.0 where modules are included
+
+Although the `go.mod` states it requires at minimum `go 1.18`, our CI is configured to run the tests against all three
+versions of Redis and latest two versions of Go ([1.23](https://go.dev/doc/devel/release#go1.23.0),
+[1.24](https://go.dev/doc/devel/release#go1.24.0)). We observe that some modules related test may not pass with
+Redis Stack 7.2 and some commands are changed with Redis CE 8.0.
+Please do refer to the documentation and the tests if you experience any issues. We do plan to update the go version
+in the `go.mod` to `go 1.24` in one of the next releases.
+
+## How do I Redis?
+
+[Learn for free at Redis University](https://university.redis.com/)
+
+[Build faster with the Redis Launchpad](https://launchpad.redis.com/)
+
+[Try the Redis Cloud](https://redis.com/try-free/)
+
+[Dive in developer tutorials](https://developer.redis.com/)
+
+[Join the Redis community](https://redis.com/community/)
+
+[Work at Redis](https://redis.com/company/careers/jobs/)
+
+## Documentation
+
+- [English](https://redis.uptrace.dev)
+- [简体中文](https://redis.uptrace.dev/zh/)
+
+## Resources
+
+- [Discussions](https://github.com/redis/go-redis/discussions)
+- [Chat](https://discord.gg/W4txy5AeKM)
+- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9)
+- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples)
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed
+key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol.
+
+## Features
+
+- Redis commands except QUIT and SYNC.
+- Automatic connection pooling.
+- [StreamingCredentialsProvider (e.g. entra id, oauth)](#1-streaming-credentials-provider-highest-priority) (experimental)
+- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html).
+- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html).
+- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html).
+- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html).
+- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html).
+- [Redis Ring](https://redis.uptrace.dev/guide/ring.html).
+- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html).
+- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/)
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+Then install go-redis/**v9**:
+
+```shell
+go get github.com/redis/go-redis/v9
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+### Authentication
+
+The Redis client supports multiple ways to provide authentication credentials, with a clear priority order. Here are the available options:
+
+#### 1. Streaming Credentials Provider (Highest Priority) - Experimental feature
+
+The streaming credentials provider allows for dynamic credential updates during the connection lifetime. This is particularly useful for managed identity services and token-based authentication.
+
+```go
+type StreamingCredentialsProvider interface {
+ Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error)
+}
+
+type CredentialsListener interface {
+ OnNext(credentials Credentials) // Called when credentials are updated
+ OnError(err error) // Called when an error occurs
+}
+
+type Credentials interface {
+ BasicAuth() (username string, password string)
+ RawCredentials() string
+}
+```
+
+Example usage:
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ StreamingCredentialsProvider: &MyCredentialsProvider{},
+})
+```
+
+**Note:** The streaming credentials provider can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) to enable Entra ID (formerly Azure AD) authentication. This allows for seamless integration with Azure's managed identity services and token-based authentication.
+
+Example with Entra ID:
+```go
+import (
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis-entraid"
+)
+
+// Create an Entra ID credentials provider
+provider := entraid.NewDefaultAzureIdentityProvider()
+
+// Configure Redis client with Entra ID authentication
+rdb := redis.NewClient(&redis.Options{
+ Addr: "your-redis-server.redis.cache.windows.net:6380",
+ StreamingCredentialsProvider: provider,
+ TLSConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ },
+})
+```
+
+#### 2. Context-based Credentials Provider
+
+The context-based provider allows credentials to be determined at the time of each operation, using the context.
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ CredentialsProviderContext: func(ctx context.Context) (string, string, error) {
+ // Return username, password, and any error
+ return "user", "pass", nil
+ },
+})
+```
+
+#### 3. Regular Credentials Provider
+
+A simple function-based provider that returns static credentials.
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ CredentialsProvider: func() (string, string) {
+ // Return username and password
+ return "user", "pass"
+ },
+})
+```
+
+#### 4. Username/Password Fields (Lowest Priority)
+
+The most basic way to provide credentials is through the `Username` and `Password` fields in the options.
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Username: "user",
+ Password: "pass",
+})
+```
+
+#### Priority Order
+
+The client will use credentials in the following priority order:
+1. Streaming Credentials Provider (if set)
+2. Context-based Credentials Provider (if set)
+3. Regular Credentials Provider (if set)
+4. Username/Password fields (if set)
+
+If none of these are set, the client will attempt to connect without authentication.
+
+### Protocol Version
+
+The client supports both RESP2 and RESP3 protocols. You can specify the protocol version in the options:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3
+})
+```
+
+### Connecting via a redis url
+
+go-redis also supports connecting via the
+[redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt).
+The example below demonstrates how the connection can easily be configured using a string, adhering
+to this specification.
+
+```go
+import (
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient() *redis.Client {
+ url := "redis://user:password@localhost:6379/0?protocol=3"
+ opts, err := redis.ParseURL(url)
+ if err != nil {
+ panic(err)
+ }
+
+ return redis.NewClient(opts)
+}
+
+```
+
+### Instrument with OpenTelemetry
+
+```go
+import (
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis/extra/redisotel/v9"
+ "errors"
+)
+
+func main() {
+ ...
+ rdb := redis.NewClient(&redis.Options{...})
+
+ if err := errors.Join(redisotel.InstrumentTracing(rdb), redisotel.InstrumentMetrics(rdb)); err != nil {
+ log.Fatal(err)
+ }
+```
+
+
+### Advanced Configuration
+
+go-redis supports extending the client identification phase to allow projects to send their own custom client identification.
+
+#### Default Client Identification
+
+By default, go-redis automatically sends the client library name and version during the connection process. This feature is available in redis-server as of version 7.2. As a result, the command is "fire and forget", meaning it should fail silently, in the case that the redis server does not support this feature.
+
+#### Disabling Identity Verification
+
+When connection identity verification is not required or needs to be explicitly disabled, a `DisableIdentity` configuration option exists.
+Initially there was a typo and the option was named `DisableIndentity` instead of `DisableIdentity`. The misspelled option is marked as Deprecated and will be removed in V10 of this library.
+Although both options will work at the moment, the correct option is `DisableIdentity`. The deprecated option will be removed in V10 of this library, so please use the correct option name to avoid any issues.
+
+To disable verification, set the `DisableIdentity` option to `true` in the Redis client options:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "",
+ DB: 0,
+ DisableIdentity: true, // Disable set-info on connect
+})
+```
+
+#### Unstable RESP3 Structures for RediSearch Commands
+When integrating Redis with application functionalities using RESP3, it's important to note that some response structures aren't final yet. This is especially true for more complex structures like search and query results. We recommend using RESP2 when using the search and query capabilities, but we plan to stabilize the RESP3-based API-s in the coming versions. You can find more guidance in the upcoming release notes.
+
+To enable unstable RESP3, set the option in your client configuration:
+
+```go
+redis.NewClient(&redis.Options{
+ UnstableResp3: true,
+ })
+```
+**Note:** When UnstableResp3 mode is enabled, it's necessary to use RawResult() and RawVal() to retrieve a raw data.
+ Since, raw response is the only option for unstable search commands Val() and Result() calls wouldn't have any affect on them:
+
+```go
+res1, err := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawResult()
+val1 := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawVal()
+```
+
+#### Redis-Search Default Dialect
+
+In the Redis-Search module, **the default dialect is 2**. If needed, you can explicitly specify a different dialect using the appropriate configuration in your queries.
+
+**Important**: Be aware that the query dialect may impact the results returned. If needed, you can revert to a different dialect version by passing the desired dialect in the arguments of the command you want to execute.
+For example:
+```
+ res2, err := rdb.FTSearchWithArgs(ctx,
+ "idx:bicycle",
+ "@pickup_zone:[CONTAINS $bike]",
+ &redis.FTSearchOptions{
+ Params: map[string]interface{}{
+ "bike": "POINT(-0.1278 51.5074)",
+ },
+ DialectVersion: 3,
+ },
+ ).Result()
+```
+You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/).
+
+## Contributing
+We welcome contributions to the go-redis library! If you have a bug fix, feature request, or improvement, please open an issue or pull request on GitHub.
+We appreciate your help in making go-redis better for everyone.
+If you are interested in contributing to the go-redis library, please check out our [contributing guidelines](CONTRIBUTING.md) for more information on how to get started.
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## Run the test
+
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```go
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```shell
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```shell
+go test
+```
+
+Another option is to run your specific tests with an already running redis. The example below, tests
+against a redis running on port 9999.:
+
+```shell
+REDIS_PORT=9999 go test
+```
+
+## See also
+
+- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite
+- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/)
+- [Golang HTTP router](https://bunrouter.uptrace.dev/)
+- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
+
+## Contributors
+
+> The go-redis project was originally initiated by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can
+> use it to monitor applications and set up automatic alerts to receive notifications via email,
+> Slack, Telegram, and others.
+>
+> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which
+> demonstrates how you can use Uptrace to monitor go-redis.
+
+Thanks to all the people who already contributed!
+
+
+
+
diff --git a/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md b/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md
new file mode 100644
index 0000000..fc9ed2a
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md
@@ -0,0 +1,137 @@
+# Release Notes
+
+# 9.9.0 (2024-03-21)
+
+## 🚀 Highlights
+- **Token-based Authentication**: Added `StreamingCredentialsProvider` for dynamic credential updates (experimental)
+ - Can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) for Azure AD authentication
+- **Connection Statistics**: Added connection waiting statistics for better monitoring
+- **Failover Improvements**: Added `ParseFailoverURL` for easier failover configuration
+- **Ring Client Enhancements**: Added shard access methods for better Pub/Sub management
+
+## ✨ New Features
+- Added `StreamingCredentialsProvider` for token-based authentication ([#3320](https://github.com/redis/go-redis/pull/3320))
+ - Supports dynamic credential updates
+ - Includes connection close hooks
+ - Note: Currently marked as experimental
+- Added `ParseFailoverURL` for parsing failover URLs ([#3362](https://github.com/redis/go-redis/pull/3362))
+- Added connection waiting statistics ([#2804](https://github.com/redis/go-redis/pull/2804))
+- Added new utility functions:
+ - `ParseFloat` and `MustParseFloat` in public utils package ([#3371](https://github.com/redis/go-redis/pull/3371))
+ - Unit tests for `Atoi`, `ParseInt`, `ParseUint`, and `ParseFloat` ([#3377](https://github.com/redis/go-redis/pull/3377))
+- Added Ring client shard access methods:
+ - `GetShardClients()` to retrieve all active shard clients
+ - `GetShardClientForKey(key string)` to get the shard client for a specific key ([#3388](https://github.com/redis/go-redis/pull/3388))
+
+## 🐛 Bug Fixes
+- Fixed routing reads to loading slave nodes ([#3370](https://github.com/redis/go-redis/pull/3370))
+- Added support for nil lag in XINFO GROUPS ([#3369](https://github.com/redis/go-redis/pull/3369))
+- Fixed pool acquisition timeout issues ([#3381](https://github.com/redis/go-redis/pull/3381))
+- Optimized unnecessary copy operations ([#3376](https://github.com/redis/go-redis/pull/3376))
+
+## 📚 Documentation
+- Updated documentation for XINFO GROUPS with nil lag support ([#3369](https://github.com/redis/go-redis/pull/3369))
+- Added package-level comments for new features
+
+## ⚡ Performance and Reliability
+- Optimized `ReplaceSpaces` function ([#3383](https://github.com/redis/go-redis/pull/3383))
+- Set default value for `Options.Protocol` in `init()` ([#3387](https://github.com/redis/go-redis/pull/3387))
+- Exported pool errors for public consumption ([#3380](https://github.com/redis/go-redis/pull/3380))
+
+## 🔧 Dependencies and Infrastructure
+- Updated Redis CI to version 8.0.1 ([#3372](https://github.com/redis/go-redis/pull/3372))
+- Updated spellcheck GitHub Actions ([#3389](https://github.com/redis/go-redis/pull/3389))
+- Removed unused parameters ([#3382](https://github.com/redis/go-redis/pull/3382), [#3384](https://github.com/redis/go-redis/pull/3384))
+
+## 🧪 Testing
+- Added unit tests for pool acquisition timeout ([#3381](https://github.com/redis/go-redis/pull/3381))
+- Added unit tests for utility functions ([#3377](https://github.com/redis/go-redis/pull/3377))
+
+## 👥 Contributors
+
+We would like to thank all the contributors who made this release possible:
+
+[@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@LINKIWI](https://github.com/LINKIWI), [@iamamirsalehi](https://github.com/iamamirsalehi), [@fukua95](https://github.com/fukua95), [@lzakharov](https://github.com/lzakharov), [@DengY11](https://github.com/DengY11)
+
+## 📝 Changelog
+
+For a complete list of changes, see the [full changelog](https://github.com/redis/go-redis/compare/v9.8.0...v9.9.0).
+
+# 9.8.0 (2025-04-30)
+
+## 🚀 Highlights
+- **Redis 8 Support**: Full compatibility with Redis 8.0, including testing and CI integration
+- **Enhanced Hash Operations**: Added support for new hash commands (`HGETDEL`, `HGETEX`, `HSETEX`) and `HSTRLEN` command
+- **Search Improvements**: Enabled Search DIALECT 2 by default and added `CountOnly` argument for `FT.Search`
+
+## ✨ New Features
+- Added support for new hash commands: `HGETDEL`, `HGETEX`, `HSETEX` ([#3305](https://github.com/redis/go-redis/pull/3305))
+- Added `HSTRLEN` command for hash operations ([#2843](https://github.com/redis/go-redis/pull/2843))
+- Added `Do` method for raw query by single connection from `pool.Conn()` ([#3182](https://github.com/redis/go-redis/pull/3182))
+- Prevent false-positive marshaling by treating zero time.Time as empty in isEmptyValue ([#3273](https://github.com/redis/go-redis/pull/3273))
+- Added FailoverClusterClient support for Universal client ([#2794](https://github.com/redis/go-redis/pull/2794))
+- Added support for cluster mode with `IsClusterMode` config parameter ([#3255](https://github.com/redis/go-redis/pull/3255))
+- Added client name support in `HELLO` RESP handshake ([#3294](https://github.com/redis/go-redis/pull/3294))
+- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213))
+- Added read-only option for failover configurations ([#3281](https://github.com/redis/go-redis/pull/3281))
+- Added `CountOnly` argument for `FT.Search` to use `LIMIT 0 0` ([#3338](https://github.com/redis/go-redis/pull/3338))
+- Added `DB` option support in `NewFailoverClusterClient` ([#3342](https://github.com/redis/go-redis/pull/3342))
+- Added `nil` check for the options when creating a client ([#3363](https://github.com/redis/go-redis/pull/3363))
+
+## 🐛 Bug Fixes
+- Fixed `PubSub` concurrency safety issues ([#3360](https://github.com/redis/go-redis/pull/3360))
+- Fixed panic caused when argument is `nil` ([#3353](https://github.com/redis/go-redis/pull/3353))
+- Improved error handling when fetching master node from sentinels ([#3349](https://github.com/redis/go-redis/pull/3349))
+- Fixed connection pool timeout issues and increased retries ([#3298](https://github.com/redis/go-redis/pull/3298))
+- Fixed context cancellation error leading to connection spikes on Primary instances ([#3190](https://github.com/redis/go-redis/pull/3190))
+- Fixed RedisCluster client to consider `MASTERDOWN` a retriable error ([#3164](https://github.com/redis/go-redis/pull/3164))
+- Fixed tracing to show complete commands instead of truncated versions ([#3290](https://github.com/redis/go-redis/pull/3290))
+- Fixed OpenTelemetry instrumentation to prevent multiple span reporting ([#3168](https://github.com/redis/go-redis/pull/3168))
+- Fixed `FT.Search` Limit argument and added `CountOnly` argument for limit 0 0 ([#3338](https://github.com/redis/go-redis/pull/3338))
+- Fixed missing command in interface ([#3344](https://github.com/redis/go-redis/pull/3344))
+- Fixed slot calculation for `COUNTKEYSINSLOT` command ([#3327](https://github.com/redis/go-redis/pull/3327))
+- Updated PubSub implementation with correct context ([#3329](https://github.com/redis/go-redis/pull/3329))
+
+## 📚 Documentation
+- Added hash search examples ([#3357](https://github.com/redis/go-redis/pull/3357))
+- Fixed documentation comments ([#3351](https://github.com/redis/go-redis/pull/3351))
+- Added `CountOnly` search example ([#3345](https://github.com/redis/go-redis/pull/3345))
+- Added examples for list commands: `LLEN`, `LPOP`, `LPUSH`, `LRANGE`, `RPOP`, `RPUSH` ([#3234](https://github.com/redis/go-redis/pull/3234))
+- Added `SADD` and `SMEMBERS` command examples ([#3242](https://github.com/redis/go-redis/pull/3242))
+- Updated `README.md` to use Redis Discord guild ([#3331](https://github.com/redis/go-redis/pull/3331))
+- Updated `HExpire` command documentation ([#3355](https://github.com/redis/go-redis/pull/3355))
+- Featured OpenTelemetry instrumentation more prominently ([#3316](https://github.com/redis/go-redis/pull/3316))
+- Updated `README.md` with additional information ([#310ce55](https://github.com/redis/go-redis/commit/310ce55))
+
+## ⚡ Performance and Reliability
+- Bound connection pool background dials to configured dial timeout ([#3089](https://github.com/redis/go-redis/pull/3089))
+- Ensured context isn't exhausted via concurrent query ([#3334](https://github.com/redis/go-redis/pull/3334))
+
+## 🔧 Dependencies and Infrastructure
+- Updated testing image to Redis 8.0-RC2 ([#3361](https://github.com/redis/go-redis/pull/3361))
+- Enabled CI for Redis CE 8.0 ([#3274](https://github.com/redis/go-redis/pull/3274))
+- Updated various dependencies:
+ - Bumped golangci/golangci-lint-action from 6.5.0 to 7.0.0 ([#3354](https://github.com/redis/go-redis/pull/3354))
+ - Bumped rojopolis/spellcheck-github-actions ([#3336](https://github.com/redis/go-redis/pull/3336))
+ - Bumped golang.org/x/net in example/otel ([#3308](https://github.com/redis/go-redis/pull/3308))
+- Migrated golangci-lint configuration to v2 format ([#3354](https://github.com/redis/go-redis/pull/3354))
+
+## ⚠️ Breaking Changes
+- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213))
+- Dropped RedisGears (Triggers and Functions) support ([#3321](https://github.com/redis/go-redis/pull/3321))
+- Dropped FT.PROFILE command that was never enabled ([#3323](https://github.com/redis/go-redis/pull/3323))
+
+## 🔒 Security
+- Fixed network error handling on SETINFO (CVE-2025-29923) ([#3295](https://github.com/redis/go-redis/pull/3295))
+
+## 🧪 Testing
+- Added integration tests for Redis 8 behavior changes in Redis Search ([#3337](https://github.com/redis/go-redis/pull/3337))
+- Added vector types INT8 and UINT8 tests ([#3299](https://github.com/redis/go-redis/pull/3299))
+- Added test codes for search_commands.go ([#3285](https://github.com/redis/go-redis/pull/3285))
+- Fixed example test sorting ([#3292](https://github.com/redis/go-redis/pull/3292))
+
+## 👥 Contributors
+
+We would like to thank all the contributors who made this release possible:
+
+[@alexander-menshchikov](https://github.com/alexander-menshchikov), [@EXPEbdodla](https://github.com/EXPEbdodla), [@afti](https://github.com/afti), [@dmaier-redislabs](https://github.com/dmaier-redislabs), [@four_leaf_clover](https://github.com/four_leaf_clover), [@alohaglenn](https://github.com/alohaglenn), [@gh73962](https://github.com/gh73962), [@justinmir](https://github.com/justinmir), [@LINKIWI](https://github.com/LINKIWI), [@liushuangbill](https://github.com/liushuangbill), [@golang88](https://github.com/golang88), [@gnpaone](https://github.com/gnpaone), [@ndyakov](https://github.com/ndyakov), [@nikolaydubina](https://github.com/nikolaydubina), [@oleglacto](https://github.com/oleglacto), [@andy-stark-redis](https://github.com/andy-stark-redis), [@rodneyosodo](https://github.com/rodneyosodo), [@dependabot](https://github.com/dependabot), [@rfyiamcool](https://github.com/rfyiamcool), [@frankxjkuang](https://github.com/frankxjkuang), [@fukua95](https://github.com/fukua95), [@soleymani-milad](https://github.com/soleymani-milad), [@ofekshenawa](https://github.com/ofekshenawa), [@khasanovbi](https://github.com/khasanovbi)
diff --git a/vendor/github.com/redis/go-redis/v9/RELEASING.md b/vendor/github.com/redis/go-redis/v9/RELEASING.md
new file mode 100644
index 0000000..1115db4
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/RELEASING.md
@@ -0,0 +1,15 @@
+# Releasing
+
+1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
+
+```shell
+TAG=v1.0.0 ./scripts/release.sh
+```
+
+2. Open a pull request and wait for the build to finish.
+
+3. Merge the pull request and run `tag.sh` to create tags for packages:
+
+```shell
+TAG=v1.0.0 ./scripts/tag.sh
+```
diff --git a/vendor/github.com/redis/go-redis/v9/acl_commands.go b/vendor/github.com/redis/go-redis/v9/acl_commands.go
new file mode 100644
index 0000000..9cb800b
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/acl_commands.go
@@ -0,0 +1,89 @@
+package redis
+
+import "context"
+
+type ACLCmdable interface {
+ ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd
+
+ ACLLog(ctx context.Context, count int64) *ACLLogCmd
+ ACLLogReset(ctx context.Context) *StatusCmd
+
+ ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd
+ ACLDelUser(ctx context.Context, username string) *IntCmd
+ ACLList(ctx context.Context) *StringSliceCmd
+
+ ACLCat(ctx context.Context) *StringSliceCmd
+ ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd
+}
+
+type ACLCatArgs struct {
+ Category string
+}
+
+func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd {
+ args := make([]interface{}, 0, 3+len(command))
+ args = append(args, "acl", "dryrun", username)
+ args = append(args, command...)
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLog(ctx context.Context, count int64) *ACLLogCmd {
+ args := make([]interface{}, 0, 3)
+ args = append(args, "acl", "log")
+ if count > 0 {
+ args = append(args, count)
+ }
+ cmd := NewACLLogCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "acl", "log", "reset")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLDelUser(ctx context.Context, username string) *IntCmd {
+ cmd := NewIntCmd(ctx, "acl", "deluser", username)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd {
+ args := make([]interface{}, 3+len(rules))
+ args[0] = "acl"
+ args[1] = "setuser"
+ args[2] = username
+ for i, rule := range rules {
+ args[i+3] = rule
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLList(ctx context.Context) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "acl", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLCat(ctx context.Context) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "acl", "cat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd {
+ // if there is a category passed, build new cmd, if there isn't - use the ACLCat method
+ if options != nil && options.Category != "" {
+ cmd := NewStringSliceCmd(ctx, "acl", "cat", options.Category)
+ _ = c(ctx, cmd)
+ return cmd
+ }
+
+ return c.ACLCat(ctx)
+}
diff --git a/vendor/github.com/redis/go-redis/v9/auth/auth.go b/vendor/github.com/redis/go-redis/v9/auth/auth.go
new file mode 100644
index 0000000..1f5c802
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/auth/auth.go
@@ -0,0 +1,61 @@
+// Package auth package provides authentication-related interfaces and types.
+// It also includes a basic implementation of credentials using username and password.
+package auth
+
+// StreamingCredentialsProvider is an interface that defines the methods for a streaming credentials provider.
+// It is used to provide credentials for authentication.
+// The CredentialsListener is used to receive updates when the credentials change.
+type StreamingCredentialsProvider interface {
+ // Subscribe subscribes to the credentials provider for updates.
+ // It returns the current credentials, a cancel function to unsubscribe from the provider,
+ // and an error if any.
+ // TODO(ndyakov): Should we add context to the Subscribe method?
+ Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error)
+}
+
+// UnsubscribeFunc is a function that is used to cancel the subscription to the credentials provider.
+// It is used to unsubscribe from the provider when the credentials are no longer needed.
+type UnsubscribeFunc func() error
+
+// CredentialsListener is an interface that defines the methods for a credentials listener.
+// It is used to receive updates when the credentials change.
+// The OnNext method is called when the credentials change.
+// The OnError method is called when an error occurs while requesting the credentials.
+type CredentialsListener interface {
+ OnNext(credentials Credentials)
+ OnError(err error)
+}
+
+// Credentials is an interface that defines the methods for credentials.
+// It is used to provide the credentials for authentication.
+type Credentials interface {
+ // BasicAuth returns the username and password for basic authentication.
+ BasicAuth() (username string, password string)
+ // RawCredentials returns the raw credentials as a string.
+ // This can be used to extract the username and password from the raw credentials or
+ // additional information if present in the token.
+ RawCredentials() string
+}
+
+type basicAuth struct {
+ username string
+ password string
+}
+
+// RawCredentials returns the raw credentials as a string.
+func (b *basicAuth) RawCredentials() string {
+ return b.username + ":" + b.password
+}
+
+// BasicAuth returns the username and password for basic authentication.
+func (b *basicAuth) BasicAuth() (username string, password string) {
+ return b.username, b.password
+}
+
+// NewBasicCredentials creates a new Credentials object from the given username and password.
+func NewBasicCredentials(username, password string) Credentials {
+ return &basicAuth{
+ username: username,
+ password: password,
+ }
+}
diff --git a/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go b/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go
new file mode 100644
index 0000000..40076a0
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go
@@ -0,0 +1,47 @@
+package auth
+
+// ReAuthCredentialsListener is a struct that implements the CredentialsListener interface.
+// It is used to re-authenticate the credentials when they are updated.
+// It contains:
+// - reAuth: a function that takes the new credentials and returns an error if any.
+// - onErr: a function that takes an error and handles it.
+type ReAuthCredentialsListener struct {
+ reAuth func(credentials Credentials) error
+ onErr func(err error)
+}
+
+// OnNext is called when the credentials are updated.
+// It calls the reAuth function with the new credentials.
+// If the reAuth function returns an error, it calls the onErr function with the error.
+func (c *ReAuthCredentialsListener) OnNext(credentials Credentials) {
+ if c.reAuth == nil {
+ return
+ }
+
+ err := c.reAuth(credentials)
+ if err != nil {
+ c.OnError(err)
+ }
+}
+
+// OnError is called when an error occurs.
+// It can be called from both the credentials provider and the reAuth function.
+func (c *ReAuthCredentialsListener) OnError(err error) {
+ if c.onErr == nil {
+ return
+ }
+
+ c.onErr(err)
+}
+
+// NewReAuthCredentialsListener creates a new ReAuthCredentialsListener.
+// Implements the auth.CredentialsListener interface.
+func NewReAuthCredentialsListener(reAuth func(credentials Credentials) error, onErr func(err error)) *ReAuthCredentialsListener {
+ return &ReAuthCredentialsListener{
+ reAuth: reAuth,
+ onErr: onErr,
+ }
+}
+
+// Ensure ReAuthCredentialsListener implements the CredentialsListener interface.
+var _ CredentialsListener = (*ReAuthCredentialsListener)(nil)
diff --git a/vendor/github.com/redis/go-redis/v9/bitmap_commands.go b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go
new file mode 100644
index 0000000..a215582
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go
@@ -0,0 +1,161 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type BitMapCmdable interface {
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd
+ BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd
+ BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd
+}
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+ Unit string // BYTE(default) | BIT
+}
+
+const BitCountIndexByte string = "BYTE"
+const BitCountIndexBit string = "BIT"
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := make([]any, 2, 5)
+ args[0] = "bitcount"
+ args[1] = key
+ if bitCount != nil {
+ args = append(args, bitCount.Start, bitCount.End)
+ if bitCount.Unit != "" {
+ if bitCount.Unit != BitCountIndexByte && bitCount.Unit != BitCountIndexBit {
+ cmd := NewIntCmd(ctx)
+ cmd.SetErr(errors.New("redis: invalid bitcount index"))
+ return cmd
+ }
+ args = append(args, bitCount.Unit)
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end
+// if you need the `byte | bit` parameter, please use `BitPosSpan`.
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitPosSpan supports the `byte | bit` parameters in redis version 7.0,
+// the bitpos command defaults to using byte type for the `start-end` range,
+// which means it counts in bytes from start to end. you can set the value
+// of "span" to determine the type of `start-end`.
+// span = "bit", cmd: bitpos key bit start end bit
+// span = "byte", cmd: bitpos key bit start end byte
+func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd {
+ cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitField accepts multiple values:
+// - BitField("set", "i1", "offset1", "value1","cmd2", "type2", "offset2", "value2")
+// - BitField([]string{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+// - BitField([]interface{}{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+func (c cmdable) BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "bitfield"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitFieldRO - Read-only variant of the BITFIELD command.
+// It is like the original BITFIELD but only accepts GET subcommand and can safely be used in read-only replicas.
+// - BitFieldRO(ctx, key, "", "", "","")
+func (c cmdable) BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "BITFIELD_RO"
+ args[1] = key
+ if len(values)%2 != 0 {
+ panic("BitFieldRO: invalid number of arguments, must be even")
+ }
+ for i := 0; i < len(values); i += 2 {
+ args = append(args, "GET", values[i], values[i+1])
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/cluster_commands.go b/vendor/github.com/redis/go-redis/v9/cluster_commands.go
new file mode 100644
index 0000000..4857b01
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/cluster_commands.go
@@ -0,0 +1,199 @@
+package redis
+
+import "context"
+
+type ClusterCmdable interface {
+ ClusterMyShardID(ctx context.Context) *StringCmd
+ ClusterMyID(ctx context.Context) *StringCmd
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterShards(ctx context.Context) *ClusterShardsCmd
+ ClusterLinks(ctx context.Context) *ClusterLinksCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+}
+
+func (c cmdable) ClusterMyShardID(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "myshardid")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMyID(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "myid")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterShards(ctx context.Context) *ClusterShardsCmd {
+ cmd := NewClusterShardsCmd(ctx, "cluster", "shards")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterLinks(ctx context.Context) *ClusterLinksCmd {
+ cmd := NewClusterLinksCmd(ctx, "cluster", "links")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go
new file mode 100644
index 0000000..5fa347f
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/command.go
@@ -0,0 +1,5622 @@
+package redis
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+type Cmder interface {
+ // command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster".
+ Name() string
+
+ // full command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster info".
+ FullName() string
+
+ // all args of the command.
+ // e.g. "set k v ex 10" -> "[set k v ex 10]".
+ Args() []interface{}
+
+ // format request and response string.
+ // e.g. "set k v ex 10" -> "set k v ex 10: OK", "get k" -> "get k: v".
+ String() string
+
+ stringArg(int) string
+ firstKeyPos() int8
+ SetFirstKeyPos(int8)
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+ readRawReply(rd *proto.Reader) error
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
+ }
+
+ switch cmd.Name() {
+ case "eval", "evalsha", "eval_ro", "evalsha_ro":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+ return 1
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
+ }
+
+ return util.BytesToString(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+ rawVal interface{}
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ arg := cmd.args[pos]
+ switch v := arg.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ default:
+ // TODO: consider using appendArg
+ return fmt.Sprint(v)
+ }
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+func (cmd *baseCmd) readRawReply(rd *proto.Reader) (err error) {
+ cmd.rawVal, err = rd.ReadReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+ cmd.val = val
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+ switch val := val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+ switch val := val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+ switch val := val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+ switch val := val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+ switch val := val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+ switch val := val.(type) {
+ case bool:
+ return val, nil
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case []interface{}:
+ return val, nil
+ default:
+ return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+ }
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := make([]string, len(slice))
+ for i, iface := range slice {
+ val, err := toString(iface)
+ if err != nil {
+ return nil, err
+ }
+ ss[i] = val
+ }
+ return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]int64, len(slice))
+ for i, iface := range slice {
+ val, err := toInt64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]uint64, len(slice))
+ for i, iface := range slice {
+ val, err := toUint64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float32, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat32(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float64, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat64(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ bools := make([]bool, len(slice))
+ for i, iface := range slice {
+ val, err := toBool(iface)
+ if err != nil {
+ return nil, err
+ }
+ bools[i] = val
+ }
+ return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadSlice()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StatusCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntCmd) SetVal(val int64) {
+ cmd.val = val
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadInt()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+ cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+ cmd.val = val
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TimeCmd) SetVal(val time.Time) {
+ cmd.val = val
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ second, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ microsecond, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val = time.Unix(second, microsecond*1000)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolCmd) SetVal(val bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadBool()
+
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if err == Nil {
+ cmd.val = false
+ err = nil
+ }
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatCmd) SetVal(val float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloat()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+ baseCmd
+
+ val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+ return &FloatSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+ return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]float64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch num, err := rd.ReadFloat(); {
+ case err == Nil:
+ cmd.val[i] = 0
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = num
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringSliceCmd) SetVal(val []string) {
+ cmd.val = val
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValue struct {
+ Key string
+ Value string
+}
+
+type KeyValueSliceCmd struct {
+ baseCmd
+
+ val []KeyValue
+}
+
+var _ Cmder = (*KeyValueSliceCmd)(nil)
+
+func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd {
+ return &KeyValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) {
+ cmd.val = val
+}
+
+func (cmd *KeyValueSliceCmd) Val() []KeyValue {
+ return cmd.val
+}
+
+func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Many commands will respond to two formats:
+// 1. 1) "one"
+// 2. (double) 1
+// 2. 1) "two"
+// 2. (double) 2
+//
+// OR:
+// 1. "two"
+// 2. (double) 2
+// 3. "one"
+// 4. (double) 1
+func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]KeyValue, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]KeyValue, n)
+ } else {
+ cmd.val = make([]KeyValue, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Value, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadBool(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringStringCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*MapStringStringCmd)(nil)
+
+func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd {
+ return &MapStringStringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringCmd) SetVal(val map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *MapStringStringCmd) Scan(dest interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ strct, err := hscan.Struct(dest)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]string, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringIntCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*MapStringIntCmd)(nil)
+
+func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd {
+ return &MapStringIntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringIntCmd) SetVal(val map[string]int64) {
+ cmd.val = val
+}
+
+func (cmd *MapStringIntCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *MapStringIntCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringIntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = nn
+ }
+ return nil
+}
+
+// ------------------------------------------------------------------------------
+type MapStringSliceInterfaceCmd struct {
+ baseCmd
+ val map[string][]interface{}
+}
+
+func NewMapStringSliceInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringSliceInterfaceCmd {
+ return &MapStringSliceInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringSliceInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringSliceInterfaceCmd) SetVal(val map[string][]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Result() (map[string][]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) {
+ readType, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string][]interface{})
+
+ switch readType {
+ case proto.RespMap:
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[k] = make([]interface{}, nn)
+ for j := 0; j < nn; j++ {
+ value, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ cmd.val[k][j] = value
+ }
+ }
+ case proto.RespArray:
+ // RESP2 response
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < n; i++ {
+ // Each entry in this array is itself an array with key details
+ itemLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = make([]interface{}, 0, itemLen-1)
+ for j := 1; j < itemLen; j++ {
+ // Read the inner array for timestamp-value pairs
+ data, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = append(cmd.val[key], data)
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+ cmd.val = val
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]struct{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+ cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = readXMessageSlice(rd)
+ return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs := make([]XMessage, n)
+ for i := 0; i < len(msgs); i++ {
+ if msgs[i], err = readXMessage(rd); err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return XMessage{}, err
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ v, err := stringInterfaceMapParser(rd)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ }
+
+ return XMessage{
+ ID: id,
+ Values: v,
+ }, nil
+}
+
+func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ m := make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+ cmd.val = val
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ var n int
+ if typ == proto.RespMap {
+ n, err = rd.ReadMapLen()
+ } else {
+ n, err = rd.ReadArrayLen()
+ }
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if typ != proto.RespMap {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+ if cmd.val[i].Stream, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+ cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ var err error
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+ cmd.val = &XPending{}
+
+ if cmd.val.Count, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val.Consumers = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+ cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XPendingExt, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+
+ if cmd.val[i].ID, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ idle, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+
+ if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+ baseCmd
+
+ start string
+ val []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+ return &XAutoClaimCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+ baseCmd
+
+ start string
+ val []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+ return &XAutoClaimJustIDCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]string, nn)
+ for i := 0; i < nn; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+ baseCmd
+ val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+ Name string
+ Pending int64
+ Idle time.Duration
+ Inactive time.Duration
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+ return &XInfoConsumersCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "consumers", stream, group},
+ },
+ }
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+ cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+ return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoConsumer, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for f := 0; f < nn; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ cmd.val[i].Name, err = rd.ReadString()
+ case "pending":
+ cmd.val[i].Pending, err = rd.ReadInt()
+ case "idle":
+ var idle int64
+ idle, err = rd.ReadInt()
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+ case "inactive":
+ var inactive int64
+ inactive, err = rd.ReadInt()
+ cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond
+ default:
+ return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+ EntriesRead int64
+ // Lag represents the number of pending messages in the stream not yet
+ // delivered to this consumer group. Returns -1 when the lag cannot be determined.
+ Lag int64
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+ cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ group := &cmd.val[i]
+
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for j := 0; j < nn; j++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "consumers":
+ group.Consumers, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "pending":
+ group.Pending, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ case "lag":
+ group.Lag, err = rd.ReadInt()
+
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ // In that case, we return -1.
+ if err != nil && err != Nil {
+ return err
+ } else if err == Nil {
+ group.Lag = -1
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ FirstEntry XMessage
+ LastEntry XMessage
+ RecordedFirstEntryID string
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = &XInfoStream{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "first-entry":
+ cmd.val.FirstEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "last-entry":
+ cmd.val.LastEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key)
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+ baseCmd
+ val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ Entries []XMessage
+ Groups []XInfoStreamGroup
+ RecordedFirstEntryID string
+}
+
+type XInfoStreamGroup struct {
+ Name string
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+ PelCount int64
+ Pending []XInfoStreamGroupPending
+ Consumers []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+ ID string
+ Consumer string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+ Name string
+ SeenTime time.Time
+ ActiveTime time.Time
+ PelCount int64
+ Pending []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+ ID string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+ return &XInfoStreamFullCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = &XInfoStreamFull{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "entries":
+ cmd.val.Entries, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = readStreamGroups(rd)
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+ return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]XInfoStreamGroup, 0, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ group := XInfoStreamGroup{}
+
+ for j := 0; j < nn; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "lag":
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ group.Lag, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "pel-count":
+ group.PelCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ case "pending":
+ group.Pending, err = readXInfoStreamGroupPending(rd)
+ if err != nil {
+ return nil, err
+ }
+ case "consumers":
+ group.Consumers, err = readXInfoStreamConsumers(rd)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ pending := make([]XInfoStreamGroupPending, 0, n)
+
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamGroupPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ p.Consumer, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ pending = append(pending, p)
+ }
+
+ return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ consumers := make([]XInfoStreamConsumer, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c := XInfoStreamConsumer{}
+
+ for f := 0; f < nn; f++ {
+ cKey, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch cKey {
+ case "name":
+ c.Name, err = rd.ReadString()
+ case "seen-time":
+ seen, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.SeenTime = time.UnixMilli(seen)
+ case "active-time":
+ active, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.ActiveTime = time.UnixMilli(active)
+ case "pel-count":
+ c.PelCount, err = rd.ReadInt()
+ case "pending":
+ pendingNumber, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+ for pn := 0; pn < pendingNumber; pn++ {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamConsumerPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = append(c.Pending, p)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM FULL reply", cKey)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ consumers = append(consumers, c)
+ }
+
+ return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+ cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]Z, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+ cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return err
+ }
+ cmd.val = &ZWithKey{}
+
+ if cmd.val.Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+ cmd.page = page
+ cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cursor, err := rd.ReadUint()
+ if err != nil {
+ return err
+ }
+ cmd.cursor = cursor
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.page = make([]string, n)
+
+ for i := 0; i < len(cmd.page); i++ {
+ if cmd.page[i], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+ NetworkingMetadata map[string]string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+ cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterSlot, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ n, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n < 2 {
+ return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ }
+
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ // subtract start and end.
+ nodes := make([]ClusterNode, n-2)
+
+ for j := 0; j < len(nodes); j++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 2 || nn > 4 {
+ return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n)
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if nn >= 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nodes[j].ID = id
+ }
+
+ if nn >= 4 {
+ metadataLength, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ networkingMetadata := make(map[string]string, metadataLength)
+
+ for i := 0; i < metadataLength; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ networkingMetadata[key] = value
+ }
+
+ nodes[j].NetworkingMetadata = networkingMetadata
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+
+ // WithCoord+WithDist+WithGeoHash
+ withLen int
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ q.withLen++
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ q.withLen++
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ q.withLen++
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+ cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.locations = make([]GeoLocation, n)
+
+ for i := 0; i < len(cmd.locations); i++ {
+ // only name
+ if cmd.q.withLen == 0 {
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // +name
+ if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil {
+ return err
+ }
+
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.q.WithDist {
+ if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithGeoHash {
+ if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+ Member string
+
+ // Latitude and Longitude when using FromLonLat option.
+ Longitude float64
+ Latitude float64
+
+ // Distance and unit when using ByRadius option.
+ // Can use m, km, ft, or mi. Default is km.
+ Radius float64
+ RadiusUnit string
+
+ // Height, width and unit when using ByBox option.
+ // Can be m, km, ft, or mi. Default is km.
+ BoxWidth float64
+ BoxHeight float64
+ BoxUnit string
+
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Count int
+ CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+ GeoSearchQuery
+
+ WithCoord bool
+ WithDist bool
+ WithHash bool
+}
+
+type GeoSearchStoreQuery struct {
+ GeoSearchQuery
+
+ // When using the StoreDist option, the command stores the items in a
+ // sorted set populated with their distance from the center of the circle or box,
+ // as a floating-point number, in the same unit specified for that shape.
+ StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithHash {
+ args = append(args, "withhash")
+ }
+
+ return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+ if q.Member != "" {
+ args = append(args, "frommember", q.Member)
+ } else {
+ args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+ }
+
+ if q.Radius > 0 {
+ if q.RadiusUnit == "" {
+ q.RadiusUnit = "km"
+ }
+ args = append(args, "byradius", q.Radius, q.RadiusUnit)
+ } else {
+ if q.BoxUnit == "" {
+ q.BoxUnit = "km"
+ }
+ args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+ }
+
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ if q.CountAny {
+ args = append(args, "any")
+ }
+ }
+
+ return args
+}
+
+type GeoSearchLocationCmd struct {
+ baseCmd
+
+ opt *GeoSearchLocationQuery
+ val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+ ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+ return &GeoSearchLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ opt: opt,
+ }
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+ cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+ return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]GeoLocation, n)
+ for i := 0; i < n; i++ {
+ _, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ var loc GeoLocation
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if cmd.opt.WithDist {
+ loc.Dist, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithHash {
+ loc.GeoHash, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ loc.Longitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ loc.Latitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val[i] = loc
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+ cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*GeoPos, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ err = rd.ReadFixedArrayLen(2)
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return err
+ }
+
+ longitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ latitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+ cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+ const numArgRedis7 = 10
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string]*CommandInfo, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch nn {
+ case numArgRedis5, numArgRedis6, numArgRedis7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn)
+ }
+
+ cmdInfo := &CommandInfo{}
+ if cmdInfo.Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ arity, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Arity = int8(arity)
+
+ flagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Flags = make([]string, flagLen)
+ for f := 0; f < len(cmdInfo.Flags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.Flags[f] = ""
+ case err != nil:
+ return err
+ default:
+ if !cmdInfo.ReadOnly && s == "readonly" {
+ cmdInfo.ReadOnly = true
+ }
+ cmdInfo.Flags[f] = s
+ }
+ }
+
+ firstKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.StepCount = int8(stepCount)
+
+ if nn >= numArgRedis6 {
+ aclFlagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.ACLFlags = make([]string, aclFlagLen)
+ for f := 0; f < len(cmdInfo.ACLFlags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.ACLFlags[f] = ""
+ case err != nil:
+ return err
+ default:
+ cmdInfo.ACLFlags[f] = s
+ }
+ }
+ }
+
+ if nn >= numArgRedis7 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ cmd.val[cmdInfo.Name] = cmdInfo
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+ cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]SlowLog, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 4 {
+ return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn)
+ }
+
+ if cmd.val[i].ID, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ createdAt, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Time = time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Duration = time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if cmdLen < 1 {
+ return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ }
+
+ cmd.val[i].Args = make([]string, cmdLen)
+ for f := 0; f < len(cmd.val[i].Args); f++ {
+ cmd.val[i].Args[f], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if nn >= 5 {
+ if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ if nn >= 6 {
+ if cmd.val[i].ClientName, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceCmd struct {
+ baseCmd
+
+ val map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceCmd)(nil)
+
+func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd {
+ return &MapStringInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err == Nil {
+ cmd.val[k] = Nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ cmd.val[k] = err
+ continue
+ }
+ return err
+ }
+ cmd.val[k] = v
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringStringSliceCmd struct {
+ baseCmd
+
+ val []map[string]string
+}
+
+var _ Cmder = (*MapStringStringSliceCmd)(nil)
+
+func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd {
+ return &MapStringStringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringSliceCmd) Val() []map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]string, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]string, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ v, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+// -----------------------------------------------------------------------
+
+// MapMapStringInterfaceCmd represents a command that returns a map of strings to interface{}.
+type MapMapStringInterfaceCmd struct {
+ baseCmd
+ val map[string]interface{}
+}
+
+func NewMapMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapMapStringInterfaceCmd {
+ return &MapMapStringInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapMapStringInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapMapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapMapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapMapStringInterfaceCmd) Val() map[string]interface{} {
+ return cmd.val
+}
+
+// readReply will try to parse the reply from the proto.Reader for both resp2 and resp3
+func (cmd *MapMapStringInterfaceCmd) readReply(rd *proto.Reader) (err error) {
+ data, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ resultMap := map[string]interface{}{}
+
+ switch midResponse := data.(type) {
+ case map[interface{}]interface{}: // resp3 will return map
+ for k, v := range midResponse {
+ stringKey, ok := k.(string)
+ if !ok {
+ return fmt.Errorf("redis: invalid map key %#v", k)
+ }
+ resultMap[stringKey] = v
+ }
+ case []interface{}: // resp2 will return array of arrays
+ n := len(midResponse)
+ for i := 0; i < n; i++ {
+ finalArr, ok := midResponse[i].([]interface{}) // final array that we need to transform to map
+ if !ok {
+ return fmt.Errorf("redis: unexpected response %#v", data)
+ }
+ m := len(finalArr)
+ if m%2 != 0 { // since this should be map, keys should be even number
+ return fmt.Errorf("redis: unexpected response %#v", data)
+ }
+
+ for j := 0; j < m; j += 2 {
+ stringKey, ok := finalArr[j].(string) // the first one
+ if !ok {
+ return fmt.Errorf("redis: invalid map key %#v", finalArr[i])
+ }
+ resultMap[stringKey] = finalArr[j+1] // second one is value
+ }
+ }
+ default:
+ return fmt.Errorf("redis: unexpected response %#v", data)
+ }
+
+ cmd.val = resultMap
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceSliceCmd struct {
+ baseCmd
+
+ val []map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceSliceCmd)(nil)
+
+func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd {
+ return &MapStringInterfaceSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]interface{}, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err != Nil {
+ return err
+ }
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValuesCmd struct {
+ baseCmd
+
+ key string
+ val []string
+}
+
+var _ Cmder = (*KeyValuesCmd)(nil)
+
+func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd {
+ return &KeyValuesCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValuesCmd) SetVal(key string, val []string) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *KeyValuesCmd) Val() (string, []string) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *KeyValuesCmd) Result() (string, []string, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *KeyValuesCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceWithKeyCmd struct {
+ baseCmd
+
+ key string
+ val []Z
+}
+
+var _ Cmder = (*ZSliceWithKeyCmd)(nil)
+
+func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd {
+ return &ZSliceWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *ZSliceWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type Function struct {
+ Name string
+ Description string
+ Flags []string
+}
+
+type Library struct {
+ Name string
+ Engine string
+ Functions []Function
+ Code string
+}
+
+type FunctionListCmd struct {
+ baseCmd
+
+ val []Library
+}
+
+var _ Cmder = (*FunctionListCmd)(nil)
+
+func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd {
+ return &FunctionListCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionListCmd) SetVal(val []Library) {
+ cmd.val = val
+}
+
+func (cmd *FunctionListCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionListCmd) Val() []Library {
+ return cmd.val
+}
+
+func (cmd *FunctionListCmd) Result() ([]Library, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionListCmd) First() (*Library, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ if len(cmd.val) > 0 {
+ return &cmd.val[0], nil
+ }
+ return nil, Nil
+}
+
+func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ libraries := make([]Library, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ library := Library{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "library_name":
+ library.Name, err = rd.ReadString()
+ case "engine":
+ library.Engine, err = rd.ReadString()
+ case "functions":
+ library.Functions, err = cmd.readFunctions(rd)
+ case "library_code":
+ library.Code, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ libraries[i] = library
+ }
+ cmd.val = libraries
+ return nil
+}
+
+func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ functions := make([]Function, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function := Function{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ if function.Name, err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ case "description":
+ if function.Description, err = rd.ReadString(); err != nil && err != Nil {
+ return nil, err
+ }
+ case "flags":
+ // resp set
+ nx, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function.Flags = make([]string, nx)
+ for j := 0; j < nx; j++ {
+ if function.Flags[j], err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ }
+ default:
+ return nil, fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+ }
+
+ functions[i] = function
+ }
+ return functions, nil
+}
+
+// FunctionStats contains information about the scripts currently executing on the server, and the available engines
+// - Engines:
+// Statistics about the engine like number of functions and number of libraries
+// - RunningScript:
+// The script currently running on the shard we're connecting to.
+// For Redis Enterprise and Redis Cloud, this represents the
+// function with the longest running time, across all the running functions, on all shards
+// - RunningScripts
+// All scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+type FunctionStats struct {
+ Engines []Engine
+ isRunning bool
+ rs RunningScript
+ allrs []RunningScript
+}
+
+func (fs *FunctionStats) Running() bool {
+ return fs.isRunning
+}
+
+func (fs *FunctionStats) RunningScript() (RunningScript, bool) {
+ return fs.rs, fs.isRunning
+}
+
+// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+func (fs *FunctionStats) AllRunningScripts() []RunningScript {
+ return fs.allrs
+}
+
+type RunningScript struct {
+ Name string
+ Command []string
+ Duration time.Duration
+}
+
+type Engine struct {
+ Language string
+ LibrariesCount int64
+ FunctionsCount int64
+}
+
+type FunctionStatsCmd struct {
+ baseCmd
+ val FunctionStats
+}
+
+var _ Cmder = (*FunctionStatsCmd)(nil)
+
+func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd {
+ return &FunctionStatsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) {
+ cmd.val = val
+}
+
+func (cmd *FunctionStatsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionStatsCmd) Val() FunctionStats {
+ return cmd.val
+}
+
+func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result FunctionStats
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "running_script":
+ result.rs, result.isRunning, err = cmd.readRunningScript(rd)
+ case "engines":
+ result.Engines, err = cmd.readEngines(rd)
+ case "all_running_scripts": // Redis Enterprise only
+ result.allrs, result.isRunning, err = cmd.readRunningScripts(rd)
+ default:
+ return fmt.Errorf("redis: function stats unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) {
+ err := rd.ReadFixedMapLen(3)
+ if err != nil {
+ if err == Nil {
+ return RunningScript{}, false, nil
+ }
+ return RunningScript{}, false, err
+ }
+
+ var runningScript RunningScript
+ for i := 0; i < 3; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+
+ switch key {
+ case "name":
+ runningScript.Name, err = rd.ReadString()
+ case "duration_ms":
+ runningScript.Duration, err = cmd.readDuration(rd)
+ case "command":
+ runningScript.Command, err = cmd.readCommand(rd)
+ default:
+ return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key)
+ }
+
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+ }
+
+ return runningScript, true, nil
+}
+
+func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ engines := make([]Engine, 0, n)
+ for i := 0; i < n; i++ {
+ engine := Engine{}
+ engine.Language, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ err = rd.ReadFixedMapLen(2)
+ if err != nil {
+ return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language)
+ }
+
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ switch key {
+ case "libraries_count":
+ engine.LibrariesCount, err = rd.ReadInt()
+ case "functions_count":
+ engine.FunctionsCount, err = rd.ReadInt()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ engines = append(engines, engine)
+ }
+ return engines, nil
+}
+
+func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) {
+ t, err := rd.ReadInt()
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(t) * time.Millisecond, nil
+}
+
+func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ command := make([]string, 0, n)
+ for i := 0; i < n; i++ {
+ x, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ command = append(command, x)
+ }
+
+ return command, nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, false, err
+ }
+
+ runningScripts := make([]RunningScript, 0, n)
+ for i := 0; i < n; i++ {
+ rs, _, err := cmd.readRunningScript(rd)
+ if err != nil {
+ return nil, false, err
+ }
+ runningScripts = append(runningScripts, rs)
+ }
+
+ return runningScripts, len(runningScripts) > 0, nil
+}
+
+//------------------------------------------------------------------------------
+
+// LCSQuery is a parameter used for the LCS command
+type LCSQuery struct {
+ Key1 string
+ Key2 string
+ Len bool
+ Idx bool
+ MinMatchLen int
+ WithMatchLen bool
+}
+
+// LCSMatch is the result set of the LCS command.
+type LCSMatch struct {
+ MatchString string
+ Matches []LCSMatchedPosition
+ Len int64
+}
+
+type LCSMatchedPosition struct {
+ Key1 LCSPosition
+ Key2 LCSPosition
+
+ // only for withMatchLen is true
+ MatchLen int64
+}
+
+type LCSPosition struct {
+ Start int64
+ End int64
+}
+
+type LCSCmd struct {
+ baseCmd
+
+ // 1: match string
+ // 2: match len
+ // 3: match idx LCSMatch
+ readType uint8
+ val *LCSMatch
+}
+
+func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd {
+ args := make([]interface{}, 3, 7)
+ args[0] = "lcs"
+ args[1] = q.Key1
+ args[2] = q.Key2
+
+ cmd := &LCSCmd{readType: 1}
+ if q.Len {
+ cmd.readType = 2
+ args = append(args, "len")
+ } else if q.Idx {
+ cmd.readType = 3
+ args = append(args, "idx")
+ if q.MinMatchLen != 0 {
+ args = append(args, "minmatchlen", q.MinMatchLen)
+ }
+ if q.WithMatchLen {
+ args = append(args, "withmatchlen")
+ }
+ }
+ cmd.baseCmd = baseCmd{
+ ctx: ctx,
+ args: args,
+ }
+
+ return cmd
+}
+
+func (cmd *LCSCmd) SetVal(val *LCSMatch) {
+ cmd.val = val
+}
+
+func (cmd *LCSCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *LCSCmd) Val() *LCSMatch {
+ return cmd.val
+}
+
+func (cmd *LCSCmd) Result() (*LCSMatch, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) {
+ lcs := &LCSMatch{}
+ switch cmd.readType {
+ case 1:
+ // match string
+ if lcs.MatchString, err = rd.ReadString(); err != nil {
+ return err
+ }
+ case 2:
+ // match len
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ case 3:
+ // read LCSMatch
+ if err = rd.ReadFixedMapLen(2); err != nil {
+ return err
+ }
+
+ // read matches or len field
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "matches":
+ // read array of matched positions
+ if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil {
+ return err
+ }
+ case "len":
+ // read match length
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ cmd.val = lcs
+ return nil
+}
+
+func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ positions := make([]LCSMatchedPosition, n)
+ for i := 0; i < n; i++ {
+ pn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ if positions[i].Key1, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+ if positions[i].Key2, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+
+ // read match length if WithMatchLen is true
+ if pn > 2 {
+ if positions[i].MatchLen, err = rd.ReadInt(); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return positions, nil
+}
+
+func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return pos, err
+ }
+ if pos.Start, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+ if pos.End, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+
+ return pos, nil
+}
+
+// ------------------------------------------------------------------------
+
+type KeyFlags struct {
+ Key string
+ Flags []string
+}
+
+type KeyFlagsCmd struct {
+ baseCmd
+
+ val []KeyFlags
+}
+
+var _ Cmder = (*KeyFlagsCmd)(nil)
+
+func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd {
+ return &KeyFlagsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) {
+ cmd.val = val
+}
+
+func (cmd *KeyFlagsCmd) Val() []KeyFlags {
+ return cmd.val
+}
+
+func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyFlagsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ if n == 0 {
+ cmd.val = make([]KeyFlags, 0)
+ return nil
+ }
+
+ cmd.val = make([]KeyFlags, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ flagsLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Flags = make([]string, flagsLen)
+
+ for j := 0; j < flagsLen; j++ {
+ if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ---------------------------------------------------------------------------------------------------
+
+type ClusterLink struct {
+ Direction string
+ Node string
+ CreateTime int64
+ Events string
+ SendBufferAllocated int64
+ SendBufferUsed int64
+}
+
+type ClusterLinksCmd struct {
+ baseCmd
+
+ val []ClusterLink
+}
+
+var _ Cmder = (*ClusterLinksCmd)(nil)
+
+func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd {
+ return &ClusterLinksCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) {
+ cmd.val = val
+}
+
+func (cmd *ClusterLinksCmd) Val() []ClusterLink {
+ return cmd.val
+}
+
+func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterLinksCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterLink, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "direction":
+ cmd.val[i].Direction, err = rd.ReadString()
+ case "node":
+ cmd.val[i].Node, err = rd.ReadString()
+ case "create-time":
+ cmd.val[i].CreateTime, err = rd.ReadInt()
+ case "events":
+ cmd.val[i].Events, err = rd.ReadString()
+ case "send-buffer-allocated":
+ cmd.val[i].SendBufferAllocated, err = rd.ReadInt()
+ case "send-buffer-used":
+ cmd.val[i].SendBufferUsed, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ------------------------------------------------------------------------------------------------------------------
+
+type SlotRange struct {
+ Start int64
+ End int64
+}
+
+type Node struct {
+ ID string
+ Endpoint string
+ IP string
+ Hostname string
+ Port int64
+ TLSPort int64
+ Role string
+ ReplicationOffset int64
+ Health string
+}
+
+type ClusterShard struct {
+ Slots []SlotRange
+ Nodes []Node
+}
+
+type ClusterShardsCmd struct {
+ baseCmd
+
+ val []ClusterShard
+}
+
+var _ Cmder = (*ClusterShardsCmd)(nil)
+
+func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd {
+ return &ClusterShardsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) {
+ cmd.val = val
+}
+
+func (cmd *ClusterShardsCmd) Val() []ClusterShard {
+ return cmd.val
+}
+
+func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterShardsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterShard, n)
+
+ for i := 0; i < n; i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "slots":
+ l, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ for k := 0; k < l; k += 2 {
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end})
+ }
+ case "nodes":
+ nodesLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Nodes = make([]Node, nodesLen)
+ for k := 0; k < nodesLen; k++ {
+ nodeMapLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for l := 0; l < nodeMapLen; l++ {
+ nodeKey, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch nodeKey {
+ case "id":
+ cmd.val[i].Nodes[k].ID, err = rd.ReadString()
+ case "endpoint":
+ cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString()
+ case "ip":
+ cmd.val[i].Nodes[k].IP, err = rd.ReadString()
+ case "hostname":
+ cmd.val[i].Nodes[k].Hostname, err = rd.ReadString()
+ case "port":
+ cmd.val[i].Nodes[k].Port, err = rd.ReadInt()
+ case "tls-port":
+ cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt()
+ case "role":
+ cmd.val[i].Nodes[k].Role, err = rd.ReadString()
+ case "replication-offset":
+ cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt()
+ case "health":
+ cmd.val[i].Nodes[k].Health, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+// -----------------------------------------
+
+type RankScore struct {
+ Rank int64
+ Score float64
+}
+
+type RankWithScoreCmd struct {
+ baseCmd
+
+ val RankScore
+}
+
+var _ Cmder = (*RankWithScoreCmd)(nil)
+
+func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd {
+ return &RankWithScoreCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *RankWithScoreCmd) SetVal(val RankScore) {
+ cmd.val = val
+}
+
+func (cmd *RankWithScoreCmd) Val() RankScore {
+ return cmd.val
+}
+
+func (cmd *RankWithScoreCmd) Result() (RankScore, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *RankWithScoreCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ rank, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ score, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = RankScore{Rank: rank, Score: score}
+
+ return nil
+}
+
+// --------------------------------------------------------------------------------------------------
+
+// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0)
+type ClientFlags uint64
+
+const (
+ ClientSlave ClientFlags = 1 << 0 /* This client is a replica */
+ ClientMaster ClientFlags = 1 << 1 /* This client is a master */
+ ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */
+ ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */
+ ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */
+ ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */
+ ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */
+ ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */
+ ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */
+ ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */
+ ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */
+ ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */
+ ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */
+ ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */
+ ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */
+ ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */
+ ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */
+ ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */
+ ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */
+ ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */
+ ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */
+ ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp
+ ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */
+ ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */
+ ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */
+ ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */
+ ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */
+ ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */
+ ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */
+ ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */
+ ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling
+ a command. usually this will be marked only during call()
+ however, blocked clients might have this flag kept until they
+ will try to reprocess the command. */
+ ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */
+ ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */
+ ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */
+ ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */
+ ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */
+ ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */
+ ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */
+ ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/
+ ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */
+ ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */
+ ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */
+ ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */
+ ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */
+ ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */
+ ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */
+ ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */
+ ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */
+)
+
+// ClientInfo is redis-server ClientInfo, not go-redis *Client
+type ClientInfo struct {
+ ID int64 // redis version 2.8.12, a unique 64-bit client ID
+ Addr string // address/port of the client
+ LAddr string // address/port of local address client connected to (bind address)
+ FD int64 // file descriptor corresponding to the socket
+ Name string // the name set by the client with CLIENT SETNAME
+ Age time.Duration // total duration of the connection in seconds
+ Idle time.Duration // idle time of the connection in seconds
+ Flags ClientFlags // client flags (see below)
+ DB int // current database ID
+ Sub int // number of channel subscriptions
+ PSub int // number of pattern matching subscriptions
+ SSub int // redis version 7.0.3, number of shard channel subscriptions
+ Multi int // number of commands in a MULTI/EXEC context
+ Watch int // redis version 7.4 RC1, number of keys this client is currently watching.
+ QueryBuf int // qbuf, query buffer length (0 means no query pending)
+ QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full)
+ ArgvMem int // incomplete arguments for the next command (already extracted from query buffer)
+ MultiMem int // redis version 7.0, memory is used up by buffered multi commands
+ BufferSize int // rbs, usable size of buffer
+ BufferPeak int // rbp, peak used size of buffer in last 5 sec interval
+ OutputBufferLength int // obl, output buffer length
+ OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full)
+ OutputMemory int // omem, output buffer memory usage
+ TotalMemory int // tot-mem, total memory consumed by this client in its various buffers
+ IoThread int // io-thread id
+ Events string // file descriptor events (see below)
+ LastCmd string // cmd, last command played
+ User string // the authenticated username of the client
+ Redir int64 // client id of current client tracking redirection
+ Resp int // redis version 7.0, client RESP protocol version
+ LibName string // redis version 7.2, client library name
+ LibVer string // redis version 7.2, client library version
+}
+
+type ClientInfoCmd struct {
+ baseCmd
+
+ val *ClientInfo
+}
+
+var _ Cmder = (*ClientInfoCmd)(nil)
+
+func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd {
+ return &ClientInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) {
+ cmd.val = val
+}
+
+func (cmd *ClientInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClientInfoCmd) Val() *ClientInfo {
+ return cmd.val
+}
+
+func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) {
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ // sds o = catClientInfoString(sdsempty(), c);
+ // o = sdscatlen(o,"\n",1);
+ // addReplyVerbatim(c,o,sdslen(o),"txt");
+ // sdsfree(o);
+ cmd.val, err = parseClientInfo(strings.TrimSpace(txt))
+ return err
+}
+
+// fmt.Sscanf() cannot handle null values
+func parseClientInfo(txt string) (info *ClientInfo, err error) {
+ info = &ClientInfo{}
+ for _, s := range strings.Split(txt, " ") {
+ kv := strings.Split(s, "=")
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("redis: unexpected client info data (%s)", s)
+ }
+ key, val := kv[0], kv[1]
+
+ switch key {
+ case "id":
+ info.ID, err = strconv.ParseInt(val, 10, 64)
+ case "addr":
+ info.Addr = val
+ case "laddr":
+ info.LAddr = val
+ case "fd":
+ info.FD, err = strconv.ParseInt(val, 10, 64)
+ case "name":
+ info.Name = val
+ case "age":
+ var age int
+ if age, err = strconv.Atoi(val); err == nil {
+ info.Age = time.Duration(age) * time.Second
+ }
+ case "idle":
+ var idle int
+ if idle, err = strconv.Atoi(val); err == nil {
+ info.Idle = time.Duration(idle) * time.Second
+ }
+ case "flags":
+ if val == "N" {
+ break
+ }
+
+ for i := 0; i < len(val); i++ {
+ switch val[i] {
+ case 'S':
+ info.Flags |= ClientSlave
+ case 'O':
+ info.Flags |= ClientSlave | ClientMonitor
+ case 'M':
+ info.Flags |= ClientMaster
+ case 'P':
+ info.Flags |= ClientPubSub
+ case 'x':
+ info.Flags |= ClientMulti
+ case 'b':
+ info.Flags |= ClientBlocked
+ case 't':
+ info.Flags |= ClientTracking
+ case 'R':
+ info.Flags |= ClientTrackingBrokenRedir
+ case 'B':
+ info.Flags |= ClientTrackingBCAST
+ case 'd':
+ info.Flags |= ClientDirtyCAS
+ case 'c':
+ info.Flags |= ClientCloseAfterCommand
+ case 'u':
+ info.Flags |= ClientUnBlocked
+ case 'A':
+ info.Flags |= ClientCloseASAP
+ case 'U':
+ info.Flags |= ClientUnixSocket
+ case 'r':
+ info.Flags |= ClientReadOnly
+ case 'e':
+ info.Flags |= ClientNoEvict
+ case 'T':
+ info.Flags |= ClientNoTouch
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i]))
+ }
+ }
+ case "db":
+ info.DB, err = strconv.Atoi(val)
+ case "sub":
+ info.Sub, err = strconv.Atoi(val)
+ case "psub":
+ info.PSub, err = strconv.Atoi(val)
+ case "ssub":
+ info.SSub, err = strconv.Atoi(val)
+ case "multi":
+ info.Multi, err = strconv.Atoi(val)
+ case "watch":
+ info.Watch, err = strconv.Atoi(val)
+ case "qbuf":
+ info.QueryBuf, err = strconv.Atoi(val)
+ case "qbuf-free":
+ info.QueryBufFree, err = strconv.Atoi(val)
+ case "argv-mem":
+ info.ArgvMem, err = strconv.Atoi(val)
+ case "multi-mem":
+ info.MultiMem, err = strconv.Atoi(val)
+ case "rbs":
+ info.BufferSize, err = strconv.Atoi(val)
+ case "rbp":
+ info.BufferPeak, err = strconv.Atoi(val)
+ case "obl":
+ info.OutputBufferLength, err = strconv.Atoi(val)
+ case "oll":
+ info.OutputListLength, err = strconv.Atoi(val)
+ case "omem":
+ info.OutputMemory, err = strconv.Atoi(val)
+ case "tot-mem":
+ info.TotalMemory, err = strconv.Atoi(val)
+ case "events":
+ info.Events = val
+ case "cmd":
+ info.LastCmd = val
+ case "user":
+ info.User = val
+ case "redir":
+ info.Redir, err = strconv.ParseInt(val, 10, 64)
+ case "resp":
+ info.Resp, err = strconv.Atoi(val)
+ case "lib-name":
+ info.LibName = val
+ case "lib-ver":
+ info.LibVer = val
+ case "io-thread":
+ info.IoThread, err = strconv.Atoi(val)
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info key(%s)", key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return info, nil
+}
+
+// -------------------------------------------
+
+type ACLLogEntry struct {
+ Count int64
+ Reason string
+ Context string
+ Object string
+ Username string
+ AgeSeconds float64
+ ClientInfo *ClientInfo
+ EntryID int64
+ TimestampCreated int64
+ TimestampLastUpdated int64
+}
+
+type ACLLogCmd struct {
+ baseCmd
+
+ val []*ACLLogEntry
+}
+
+var _ Cmder = (*ACLLogCmd)(nil)
+
+func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd {
+ return &ACLLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) {
+ cmd.val = val
+}
+
+func (cmd *ACLLogCmd) Val() []*ACLLogEntry {
+ return cmd.val
+}
+
+func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ACLLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]*ACLLogEntry, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i] = &ACLLogEntry{}
+ entry := cmd.val[i]
+ respLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for j := 0; j < respLen; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "count":
+ entry.Count, err = rd.ReadInt()
+ case "reason":
+ entry.Reason, err = rd.ReadString()
+ case "context":
+ entry.Context, err = rd.ReadString()
+ case "object":
+ entry.Object, err = rd.ReadString()
+ case "username":
+ entry.Username, err = rd.ReadString()
+ case "age-seconds":
+ entry.AgeSeconds, err = rd.ReadFloat()
+ case "client-info":
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt))
+ if err != nil {
+ return err
+ }
+ case "entry-id":
+ entry.EntryID, err = rd.ReadInt()
+ case "timestamp-created":
+ entry.TimestampCreated, err = rd.ReadInt()
+ case "timestamp-last-updated":
+ entry.TimestampLastUpdated, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// LibraryInfo holds the library info.
+type LibraryInfo struct {
+ LibName *string
+ LibVer *string
+}
+
+// WithLibraryName returns a valid LibraryInfo with library name only.
+func WithLibraryName(libName string) LibraryInfo {
+ return LibraryInfo{LibName: &libName}
+}
+
+// WithLibraryVersion returns a valid LibraryInfo with library version only.
+func WithLibraryVersion(libVer string) LibraryInfo {
+ return LibraryInfo{LibVer: &libVer}
+}
+
+// -------------------------------------------
+
+type InfoCmd struct {
+ baseCmd
+ val map[string]map[string]string
+}
+
+var _ Cmder = (*InfoCmd)(nil)
+
+func NewInfoCmd(ctx context.Context, args ...interface{}) *InfoCmd {
+ return &InfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *InfoCmd) SetVal(val map[string]map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *InfoCmd) Val() map[string]map[string]string {
+ return cmd.val
+}
+
+func (cmd *InfoCmd) Result() (map[string]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *InfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *InfoCmd) readReply(rd *proto.Reader) error {
+ val, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ section := ""
+ scanner := bufio.NewScanner(strings.NewReader(val))
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "#") {
+ if cmd.val == nil {
+ cmd.val = make(map[string]map[string]string)
+ }
+ section = strings.TrimPrefix(line, "# ")
+ cmd.val[section] = make(map[string]string)
+ } else if line != "" {
+ if section == "Modules" {
+ moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`)
+ kv := moduleRe.FindStringSubmatch(line)
+ if len(kv) == 3 {
+ cmd.val[section][kv[1]] = kv[2]
+ }
+ } else {
+ kv := strings.SplitN(line, ":", 2)
+ if len(kv) == 2 {
+ cmd.val[section][kv[0]] = kv[1]
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (cmd *InfoCmd) Item(section, key string) string {
+ if cmd.val == nil {
+ return ""
+ } else if cmd.val[section] == nil {
+ return ""
+ } else {
+ return cmd.val[section][key]
+ }
+}
+
+type MonitorStatus int
+
+const (
+ monitorStatusIdle MonitorStatus = iota
+ monitorStatusStart
+ monitorStatusStop
+)
+
+type MonitorCmd struct {
+ baseCmd
+ ch chan string
+ status MonitorStatus
+ mu sync.Mutex
+}
+
+func newMonitorCmd(ctx context.Context, ch chan string) *MonitorCmd {
+ return &MonitorCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"monitor"},
+ },
+ ch: ch,
+ status: monitorStatusIdle,
+ mu: sync.Mutex{},
+ }
+}
+
+func (cmd *MonitorCmd) String() string {
+ return cmdString(cmd, nil)
+}
+
+func (cmd *MonitorCmd) readReply(rd *proto.Reader) error {
+ ctx, cancel := context.WithCancel(cmd.ctx)
+ go func(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ err := cmd.readMonitor(rd, cancel)
+ if err != nil {
+ cmd.err = err
+ return
+ }
+ }
+ }
+ }(ctx)
+ return nil
+}
+
+func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) error {
+ for {
+ cmd.mu.Lock()
+ st := cmd.status
+ pk, _ := rd.Peek(1)
+ cmd.mu.Unlock()
+ if len(pk) != 0 && st == monitorStatusStart {
+ cmd.mu.Lock()
+ line, err := rd.ReadString()
+ cmd.mu.Unlock()
+ if err != nil {
+ return err
+ }
+ cmd.ch <- line
+ }
+ if st == monitorStatusStop {
+ cancel()
+ break
+ }
+ }
+ return nil
+}
+
+func (cmd *MonitorCmd) Start() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStart
+}
+
+func (cmd *MonitorCmd) Stop() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStop
+}
diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go
new file mode 100644
index 0000000..2713232
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/commands.go
@@ -0,0 +1,732 @@
+package redis
+
+import (
+ "context"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+)
+
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+ dur, time.Millisecond,
+ )
+ return 1
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
+ dur, time.Second,
+ )
+ return 1
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case map[string]string:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP:
+ return append(dst, arg)
+ case nil:
+ return dst
+ default:
+ // scan struct field
+ v := reflect.ValueOf(arg)
+ if v.Type().Kind() == reflect.Ptr {
+ if v.IsNil() {
+ // error: arg is not a valid object
+ return dst
+ }
+ v = v.Elem()
+ }
+
+ if v.Type().Kind() == reflect.Struct {
+ return appendStructField(dst, v)
+ }
+
+ return append(dst, arg)
+ }
+}
+
+// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst.
+func appendStructField(dst []interface{}, v reflect.Value) []interface{} {
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ tag := typ.Field(i).Tag.Get("redis")
+ if tag == "" || tag == "-" {
+ continue
+ }
+ name, opt, _ := strings.Cut(tag, ",")
+ if name == "" {
+ continue
+ }
+
+ field := v.Field(i)
+
+ // miss field
+ if omitEmpty(opt) && isEmptyValue(field) {
+ continue
+ }
+
+ if field.CanInterface() {
+ dst = append(dst, name, field.Interface())
+ }
+ }
+
+ return dst
+}
+
+func omitEmpty(opt string) bool {
+ for opt != "" {
+ var name string
+ name, opt, _ = strings.Cut(opt, ",")
+ if name == "omitempty" {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Pointer:
+ return v.IsNil()
+ case reflect.Struct:
+ if v.Type() == reflect.TypeOf(time.Time{}) {
+ return v.IsZero()
+ }
+ // Only supports the struct time.Time,
+ // subsequent iterations will follow the func Scan support decoder.
+ }
+ return false
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command(ctx context.Context) *CommandsInfoCmd
+ CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd
+ CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd
+ CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientInfo(ctx context.Context) *ClientInfoCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientUnpause(ctx context.Context) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ClientUnblock(ctx context.Context, id int64) *IntCmd
+ ClientUnblockWithError(ctx context.Context, id int64) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ SlowLogGet(ctx context.Context, num int64) *SlowLogCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd
+
+ ACLCmdable
+ BitMapCmdable
+ ClusterCmdable
+ GenericCmdable
+ GeoCmdable
+ HashCmdable
+ HyperLogLogCmdable
+ ListCmdable
+ ProbabilisticCmdable
+ PubSubCmdable
+ ScriptingFunctionsCmdable
+ SearchCmdable
+ SetCmdable
+ SortedSetCmdable
+ StringCmdable
+ StreamCmdable
+ TimeseriesCmdable
+ JSONCmdable
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+ ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd
+ Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// AuthACL Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) WaitAOF(ctx context.Context, numLocal, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "waitAOF", numLocal, numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetInfo sends a CLIENT SETINFO command with the provided info.
+func (c statefulCmdable) ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd {
+ err := info.Validate()
+ if err != nil {
+ panic(err.Error())
+ }
+
+ var cmd *StatusCmd
+ if info.LibName != nil {
+ libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, internal.ReplaceSpaces(runtime.Version()))
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-NAME", libName)
+ } else {
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-VER", *info.LibVer)
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Validate checks if only one field in the struct is non-nil.
+func (info LibraryInfo) Validate() error {
+ if info.LibName != nil && info.LibVer != nil {
+ return errors.New("both LibName and LibVer cannot be set at the same time")
+ }
+ if info.LibName == nil && info.LibVer == nil {
+ return errors.New("at least one of LibName and LibVer should be set")
+ }
+ return nil
+}
+
+// Hello sets the resp protocol used.
+func (c statefulCmdable) Hello(ctx context.Context,
+ ver int, username, password, clientName string,
+) *MapStringInterfaceCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "hello", ver)
+ if password != "" {
+ if username != "" {
+ args = append(args, "auth", username, password)
+ } else {
+ args = append(args, "auth", "default", password)
+ }
+ }
+ if clientName != "" {
+ args = append(args, "setname", clientName)
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FilterBy is used for the `CommandList` command parameter.
+type FilterBy struct {
+ Module string
+ ACLCat string
+ Pattern string
+}
+
+func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd {
+ args := make([]interface{}, 0, 5)
+ args = append(args, "command", "list")
+ if filter != nil {
+ if filter.Module != "" {
+ args = append(args, "filterby", "module", filter.Module)
+ } else if filter.ACLCat != "" {
+ args = append(args, "filterby", "aclcat", filter.ACLCat)
+ } else if filter.Pattern != "" {
+ args = append(args, "filterby", "pattern", filter.Pattern)
+ }
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeys"
+ copy(args[2:], commands)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeysandflags"
+ copy(args[2:], commands)
+ cmd := NewKeyFlagsCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
+ panic("not implemented")
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL [value] ... [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnpause(ctx context.Context) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "unpause")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "id")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientInfo(ctx context.Context) *ClientInfoCmd {
+ cmd := NewClientInfoCmd(ctx, "client", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ------------------------------------------------------------------------------------------------
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "config", "get", parameter)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "resetstat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "rewrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, sections ...string) *StringCmd {
+ args := make([]interface{}, 1+len(sections))
+ args[0] = "info"
+ for i, section := range sections {
+ args[i+1] = section
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) InfoMap(ctx context.Context, sections ...string) *InfoCmd {
+ args := make([]interface{}, 1+len(sections))
+ args[0] = "info"
+ for i, section := range sections {
+ args[i+1] = section
+ }
+ cmd := NewInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "lastsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "save")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errors.New(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "slaveof", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+ cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sync(_ context.Context) {
+ panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+ cmd := NewTimeCmd(ctx, "time")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "debug", "object", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// ModuleLoadexConfig struct is used to specify the arguments for the MODULE LOADEX command of redis.
+// `MODULE LOADEX path [CONFIG name value [CONFIG name value ...]] [ARGS args [args ...]]`
+type ModuleLoadexConfig struct {
+ Path string
+ Conf map[string]interface{}
+ Args []interface{}
+}
+
+func (c *ModuleLoadexConfig) toArgs() []interface{} {
+ args := make([]interface{}, 3, 3+len(c.Conf)*3+len(c.Args)*2)
+ args[0] = "MODULE"
+ args[1] = "LOADEX"
+ args[2] = c.Path
+ for k, v := range c.Conf {
+ args = append(args, "CONFIG", k, v)
+ }
+ for _, arg := range c.Args {
+ args = append(args, "ARGS", arg)
+ }
+ return args
+}
+
+// ModuleLoadex Redis `MODULE LOADEX path [CONFIG name value [CONFIG name value ...]] [ARGS args [args ...]]` command.
+func (c cmdable) ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd {
+ cmd := NewStringCmd(ctx, conf.toArgs()...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+/*
+Monitor - represents a Redis MONITOR command, allowing the user to capture
+and process all commands sent to a Redis server. This mimics the behavior of
+MONITOR in the redis-cli.
+
+Notes:
+- Using MONITOR blocks the connection to the server for itself. It needs a dedicated connection
+- The user should create a channel of type string
+- This runs concurrently in the background. Trigger via the Start and Stop functions
+See further: Redis MONITOR command: https://redis.io/commands/monitor
+*/
+func (c cmdable) Monitor(ctx context.Context, ch chan string) *MonitorCmd {
+ cmd := newMonitorCmd(ctx, ch)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/doc.go b/vendor/github.com/redis/go-redis/v9/doc.go
new file mode 100644
index 0000000..5526253
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/vendor/github.com/redis/go-redis/v9/docker-compose.yml b/vendor/github.com/redis/go-redis/v9/docker-compose.yml
new file mode 100644
index 0000000..3d4347b
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/docker-compose.yml
@@ -0,0 +1,106 @@
+---
+
+services:
+ redis:
+ image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2}
+ platform: linux/amd64
+ container_name: redis-standalone
+ environment:
+ - TLS_ENABLED=yes
+ - REDIS_CLUSTER=no
+ - PORT=6379
+ - TLS_PORT=6666
+ command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""}
+ ports:
+ - 6379:6379
+ - 6666:6666 # TLS port
+ volumes:
+ - "./dockers/standalone:/redis/work"
+ profiles:
+ - standalone
+ - sentinel
+ - all-stack
+ - all
+
+ osscluster:
+ image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2}
+ platform: linux/amd64
+ container_name: redis-osscluster
+ environment:
+ - NODES=6
+ - PORT=16600
+ command: "--cluster-enabled yes"
+ ports:
+ - "16600-16605:16600-16605"
+ volumes:
+ - "./dockers/osscluster:/redis/work"
+ profiles:
+ - cluster
+ - all-stack
+ - all
+
+ sentinel-cluster:
+ image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2}
+ platform: linux/amd64
+ container_name: redis-sentinel-cluster
+ network_mode: "host"
+ environment:
+ - NODES=3
+ - TLS_ENABLED=yes
+ - REDIS_CLUSTER=no
+ - PORT=9121
+ command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""}
+ #ports:
+ # - "9121-9123:9121-9123"
+ volumes:
+ - "./dockers/sentinel-cluster:/redis/work"
+ profiles:
+ - sentinel
+ - all-stack
+ - all
+
+ sentinel:
+ image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2}
+ platform: linux/amd64
+ container_name: redis-sentinel
+ depends_on:
+ - sentinel-cluster
+ environment:
+ - NODES=3
+ - REDIS_CLUSTER=no
+ - PORT=26379
+ command: ${REDIS_EXTRA_ARGS:---sentinel}
+ network_mode: "host"
+ #ports:
+ # - 26379:26379
+ # - 26380:26380
+ # - 26381:26381
+ volumes:
+ - "./dockers/sentinel.conf:/redis/config-default/redis.conf"
+ - "./dockers/sentinel:/redis/work"
+ profiles:
+ - sentinel
+ - all-stack
+ - all
+
+ ring-cluster:
+ image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2}
+ platform: linux/amd64
+ container_name: redis-ring-cluster
+ environment:
+ - NODES=3
+ - TLS_ENABLED=yes
+ - REDIS_CLUSTER=no
+ - PORT=6390
+ command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""}
+ ports:
+ - 6390:6390
+ - 6391:6391
+ - 6392:6392
+ volumes:
+ - "./dockers/ring:/redis/work"
+ profiles:
+ - ring
+ - cluster
+ - all-stack
+ - all
diff --git a/vendor/github.com/redis/go-redis/v9/error.go b/vendor/github.com/redis/go-redis/v9/error.go
new file mode 100644
index 0000000..8c81196
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/error.go
@@ -0,0 +1,181 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net"
+ "strings"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// ErrClosed performs any operation on the closed client will return this error.
+var ErrClosed = pool.ErrClosed
+
+// ErrPoolExhausted is returned from a pool connection method
+// when the maximum number of database connections in the pool has been reached.
+var ErrPoolExhausted = pool.ErrPoolExhausted
+
+// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
+var ErrPoolTimeout = pool.ErrPoolTimeout
+
+// HasErrorPrefix checks if the err is a Redis error and the message contains a prefix.
+func HasErrorPrefix(err error, prefix string) bool {
+ var rErr Error
+ if !errors.As(err, &rErr) {
+ return false
+ }
+ msg := rErr.Error()
+ msg = strings.TrimPrefix(msg, "ERR ") // KVRocks adds such prefix
+ return strings.HasPrefix(msg, prefix)
+}
+
+type Error interface {
+ error
+
+ // RedisError is a no-op function but
+ // serves to distinguish types that are Redis
+ // errors from ordinary errors: a type is a
+ // Redis error if it has a RedisError method.
+ RedisError()
+}
+
+var _ Error = proto.RedisError("")
+
+func isContextError(err error) bool {
+ switch err {
+ case context.Canceled, context.DeadlineExceeded:
+ return true
+ default:
+ return false
+ }
+}
+
+func shouldRetry(err error, retryTimeout bool) bool {
+ switch err {
+ case io.EOF, io.ErrUnexpectedEOF:
+ return true
+ case nil, context.Canceled, context.DeadlineExceeded:
+ return false
+ case pool.ErrPoolTimeout:
+ // connection pool timeout, increase retries. #3289
+ return true
+ }
+
+ if v, ok := err.(timeoutError); ok {
+ if v.Timeout() {
+ return retryTimeout
+ }
+ return true
+ }
+
+ s := err.Error()
+ if s == "ERR max number of clients reached" {
+ return true
+ }
+ if strings.HasPrefix(s, "LOADING ") {
+ return true
+ }
+ if strings.HasPrefix(s, "READONLY ") {
+ return true
+ }
+ if strings.HasPrefix(s, "MASTERDOWN ") {
+ return true
+ }
+ if strings.HasPrefix(s, "CLUSTERDOWN ") {
+ return true
+ }
+ if strings.HasPrefix(s, "TRYAGAIN ") {
+ return true
+ }
+
+ return false
+}
+
+func isRedisError(err error) bool {
+ _, ok := err.(proto.RedisError)
+ return ok
+}
+
+func isBadConn(err error, allowTimeout bool, addr string) bool {
+ switch err {
+ case nil:
+ return false
+ case context.Canceled, context.DeadlineExceeded:
+ return true
+ }
+
+ if isRedisError(err) {
+ switch {
+ case isReadOnlyError(err):
+ // Close connections in read only state in case domain addr is used
+ // and domain resolves to a different Redis Server. See #790.
+ return true
+ case isMovedSameConnAddr(err, addr):
+ // Close connections when we are asked to move to the same addr
+ // of the connection. Force a DNS resolution when all connections
+ // of the pool are recycled
+ return true
+ default:
+ return false
+ }
+ }
+
+ if allowTimeout {
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ return false
+ }
+ }
+
+ return true
+}
+
+func isMovedError(err error) (moved bool, ask bool, addr string) {
+ if !isRedisError(err) {
+ return
+ }
+
+ s := err.Error()
+ switch {
+ case strings.HasPrefix(s, "MOVED "):
+ moved = true
+ case strings.HasPrefix(s, "ASK "):
+ ask = true
+ default:
+ return
+ }
+
+ ind := strings.LastIndex(s, " ")
+ if ind == -1 {
+ return false, false, ""
+ }
+
+ addr = s[ind+1:]
+ addr = internal.GetAddr(addr)
+ return
+}
+
+func isLoadingError(err error) bool {
+ return strings.HasPrefix(err.Error(), "LOADING ")
+}
+
+func isReadOnlyError(err error) bool {
+ return strings.HasPrefix(err.Error(), "READONLY ")
+}
+
+func isMovedSameConnAddr(err error, addr string) bool {
+ redisError := err.Error()
+ if !strings.HasPrefix(redisError, "MOVED ") {
+ return false
+ }
+ return strings.HasSuffix(redisError, " "+addr)
+}
+
+//------------------------------------------------------------------------------
+
+type timeoutError interface {
+ Timeout() bool
+}
diff --git a/vendor/github.com/redis/go-redis/v9/generic_commands.go b/vendor/github.com/redis/go-redis/v9/generic_commands.go
new file mode 100644
index 0000000..dc6c3fe
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/generic_commands.go
@@ -0,0 +1,384 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type GenericCmdable interface {
+ Del(ctx context.Context, keys ...string) *IntCmd
+ Dump(ctx context.Context, key string) *StringCmd
+ Exists(ctx context.Context, keys ...string) *IntCmd
+ Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ ExpireTime(ctx context.Context, key string) *DurationCmd
+ ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ Keys(ctx context.Context, pattern string) *StringSliceCmd
+ Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+ Move(ctx context.Context, key string, db int) *BoolCmd
+ ObjectFreq(ctx context.Context, key string) *IntCmd
+ ObjectRefCount(ctx context.Context, key string) *IntCmd
+ ObjectEncoding(ctx context.Context, key string) *StringCmd
+ ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+ Persist(ctx context.Context, key string) *BoolCmd
+ PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ PExpireTime(ctx context.Context, key string) *DurationCmd
+ PTTL(ctx context.Context, key string) *DurationCmd
+ RandomKey(ctx context.Context) *StringCmd
+ Rename(ctx context.Context, key, newkey string) *StatusCmd
+ RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+ Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+ SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+ Touch(ctx context.Context, keys ...string) *IntCmd
+ TTL(ctx context.Context, key string) *DurationCmd
+ Type(ctx context.Context, key string) *StatusCmd
+ Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
+
+ Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+ ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "dump", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+ ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+ args := make([]interface{}, 3, 4)
+ args[0] = "expire"
+ args[1] = key
+ args[2] = formatSec(ctx, expiration)
+ if mode != "" {
+ args = append(args, mode)
+ }
+
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "expiretime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "keys", pattern)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "move", key, db)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectFreq(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "freq", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "refcount", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "object", "encoding", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "persist", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ ctx,
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pexpiretime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "randomkey")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "rename", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ "replace",
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(command, key string) []interface{} {
+ args := []interface{}{command, key}
+
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c cmdable) SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args("sort_ro", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args("sort", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+ args := sort.args("sort", key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(ctx, sort.args("sort", key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "type", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+ args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+ if replace {
+ args = append(args, "REPLACE")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ if keyType != "" {
+ args = append(args, "type", keyType)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/geo_commands.go b/vendor/github.com/redis/go-redis/v9/geo_commands.go
new file mode 100644
index 0000000..f047b98
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/geo_commands.go
@@ -0,0 +1,155 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type GeoCmdable interface {
+ GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+ GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+ GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+ GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+ GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
+ GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+ GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadiusbymember", key, member)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+ args := make([]interface{}, 0, 13)
+ args = append(args, "geosearch", key)
+ args = geoSearchArgs(q, args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+ ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+ args := make([]interface{}, 0, 16)
+ args = append(args, "geosearch", key)
+ args = geoSearchLocationArgs(q, args)
+ cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+ args := make([]interface{}, 0, 15)
+ args = append(args, "geosearchstore", store, key)
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+ if q.StoreDist {
+ args = append(args, "storedist")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoDist(
+ ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/hash_commands.go b/vendor/github.com/redis/go-redis/v9/hash_commands.go
new file mode 100644
index 0000000..98a361b
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/hash_commands.go
@@ -0,0 +1,611 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type HashCmdable interface {
+ HDel(ctx context.Context, key string, fields ...string) *IntCmd
+ HExists(ctx context.Context, key, field string) *BoolCmd
+ HGet(ctx context.Context, key, field string) *StringCmd
+ HGetAll(ctx context.Context, key string) *MapStringStringCmd
+ HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd
+ HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd
+ HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd
+ HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+ HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+ HKeys(ctx context.Context, key string) *StringSliceCmd
+ HLen(ctx context.Context, key string) *IntCmd
+ HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+ HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+ HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+ HSetEX(ctx context.Context, key string, fieldsAndValues ...string) *IntCmd
+ HSetEXWithArgs(ctx context.Context, key string, options *HSetEXOptions, fieldsAndValues ...string) *IntCmd
+ HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+ HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ HVals(ctx context.Context, key string) *StringSliceCmd
+ HRandField(ctx context.Context, key string, count int) *StringSliceCmd
+ HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd
+ HStrLen(ctx context.Context, key, field string) *IntCmd
+ HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd
+ HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd
+ HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd
+ HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd
+ HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd
+ HExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd
+ HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd
+ HPExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd
+ HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd
+ HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd
+ HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd
+ HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd
+ HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd
+}
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hexists", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+ cmd := NewStringCmd(ctx, "hget", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "hgetall", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hkeys", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "hlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HSet accepts values in following formats:
+//
+// - HSet("myhash", "key1", "value1", "key2", "value2")
+//
+// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+//
+// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Playing struct With "redis" tag.
+// type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` }
+//
+// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0
+//
+// For struct, can be a structure pointer type, we only parse the field whose tag is redis.
+// if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it,
+// or you don't need to set the redis tag.
+// For the type of structure field, we only support simple data types:
+// string, int/uint(8,16,32,64), float(32,64), time.Time(to RFC3339Nano), time.Duration(to Nanoseconds ),
+// if you are other more complex or custom data types, please implement the encoding.BinaryMarshaler interface.
+//
+// Note that in older versions of Redis server(redis-server < 4.0), HSet only supports a single key-value pair.
+// redis-docs: https://redis.io/commands/hset (Starting with Redis version 4.0.0: Accepts multiple field and value arguments.)
+// If you are using a Struct type and the number of fields is greater than one,
+// you will receive an error similar to "ERR wrong number of arguments", you can use HMSet as a substitute.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hmset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hvals", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hrandfield", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HRandFieldWithValues redis-server version >= 6.2.0.
+func (c cmdable) HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd {
+ cmd := NewKeyValueSliceCmd(ctx, "hrandfield", key, count, "withvalues")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HStrLen(ctx context.Context, key, field string) *IntCmd {
+ cmd := NewIntCmd(ctx, "hstrlen", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+func (c cmdable) HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ args = append(args, "novalues")
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type HExpireArgs struct {
+ NX bool
+ XX bool
+ GT bool
+ LT bool
+}
+
+// HExpire - Sets the expiration time for specified fields in a hash in seconds.
+// The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields.
+// Available since Redis 7.4 CE.
+// For more information refer to [HEXPIRE Documentation].
+//
+// [HEXPIRE Documentation]: https://redis.io/commands/hexpire/
+func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration), "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HExpireWithArgs - Sets the expiration time for specified fields in a hash in seconds.
+// It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields.
+// The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields.
+// Available since Redis 7.4 CE.
+// For more information refer to [HEXPIRE Documentation].
+//
+// [HEXPIRE Documentation]: https://redis.io/commands/hexpire/
+func (c cmdable) HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration)}
+
+ // only if one argument is true, we can add it to the args
+ // if more than one argument is true, it will cause an error
+ if expirationArgs.NX {
+ args = append(args, "NX")
+ } else if expirationArgs.XX {
+ args = append(args, "XX")
+ } else if expirationArgs.GT {
+ args = append(args, "GT")
+ } else if expirationArgs.LT {
+ args = append(args, "LT")
+ }
+
+ args = append(args, "FIELDS", len(fields))
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HPExpire - Sets the expiration time for specified fields in a hash in milliseconds.
+// Similar to HExpire, it accepts a key, an expiration duration in milliseconds, a struct with expiration condition flags, and a list of fields.
+// The command modifies the standard time.Duration to milliseconds for the Redis command.
+// Available since Redis 7.4 CE.
+// For more information refer to [HPEXPIRE Documentation].
+//
+// [HPEXPIRE Documentation]: https://redis.io/commands/hpexpire/
+func (c cmdable) HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration), "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HPExpireWithArgs - Sets the expiration time for specified fields in a hash in milliseconds.
+// It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields.
+// The command constructs an argument list starting with "HPEXPIRE", followed by the key, duration, any conditional flags, and the specified fields.
+// Available since Redis 7.4 CE.
+// For more information refer to [HPEXPIRE Documentation].
+//
+// [HPEXPIRE Documentation]: https://redis.io/commands/hpexpire/
+func (c cmdable) HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration)}
+
+ // only if one argument is true, we can add it to the args
+ // if more than one argument is true, it will cause an error
+ if expirationArgs.NX {
+ args = append(args, "NX")
+ } else if expirationArgs.XX {
+ args = append(args, "XX")
+ } else if expirationArgs.GT {
+ args = append(args, "GT")
+ } else if expirationArgs.LT {
+ args = append(args, "LT")
+ }
+
+ args = append(args, "FIELDS", len(fields))
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in seconds.
+// Takes a key, a UNIX timestamp, a struct of conditional flags, and a list of fields.
+// The command sets absolute expiration times based on the UNIX timestamp provided.
+// Available since Redis 7.4 CE.
+// For more information refer to [HExpireAt Documentation].
+//
+// [HExpireAt Documentation]: https://redis.io/commands/hexpireat/
+func (c cmdable) HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd {
+
+ args := []interface{}{"HEXPIREAT", key, tm.Unix(), "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HEXPIREAT", key, tm.Unix()}
+
+ // only if one argument is true, we can add it to the args
+ // if more than one argument is true, it will cause an error
+ if expirationArgs.NX {
+ args = append(args, "NX")
+ } else if expirationArgs.XX {
+ args = append(args, "XX")
+ } else if expirationArgs.GT {
+ args = append(args, "GT")
+ } else if expirationArgs.LT {
+ args = append(args, "LT")
+ }
+
+ args = append(args, "FIELDS", len(fields))
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HPExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in milliseconds.
+// Similar to HExpireAt but for timestamps in milliseconds. It accepts the same parameters and adjusts the UNIX time to milliseconds.
+// Available since Redis 7.4 CE.
+// For more information refer to [HExpireAt Documentation].
+//
+// [HExpireAt Documentation]: https://redis.io/commands/hexpireat/
+func (c cmdable) HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond), "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HPExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond)}
+
+ // only if one argument is true, we can add it to the args
+ // if more than one argument is true, it will cause an error
+ if expirationArgs.NX {
+ args = append(args, "NX")
+ } else if expirationArgs.XX {
+ args = append(args, "XX")
+ } else if expirationArgs.GT {
+ args = append(args, "GT")
+ } else if expirationArgs.LT {
+ args = append(args, "LT")
+ }
+
+ args = append(args, "FIELDS", len(fields))
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HPersist - Removes the expiration time from specified fields in a hash.
+// Accepts a key and the fields themselves.
+// This command ensures that each field specified will have its expiration removed if present.
+// Available since Redis 7.4 CE.
+// For more information refer to [HPersist Documentation].
+//
+// [HPersist Documentation]: https://redis.io/commands/hpersist/
+func (c cmdable) HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HPERSIST", key, "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in seconds.
+// Requires a key and the fields themselves to fetch their expiration timestamps.
+// This command returns the expiration times for each field or error/status codes for each field as specified.
+// Available since Redis 7.4 CE.
+// For more information refer to [HExpireTime Documentation].
+//
+// [HExpireTime Documentation]: https://redis.io/commands/hexpiretime/
+// For more information - https://redis.io/commands/hexpiretime/
+func (c cmdable) HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HEXPIRETIME", key, "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HPExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in milliseconds.
+// Similar to HExpireTime, adjusted for timestamps in milliseconds. It requires the same parameters.
+// Provides the expiration timestamp for each field in milliseconds.
+// Available since Redis 7.4 CE.
+// For more information refer to [HExpireTime Documentation].
+//
+// [HExpireTime Documentation]: https://redis.io/commands/hexpiretime/
+// For more information - https://redis.io/commands/hexpiretime/
+func (c cmdable) HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HPEXPIRETIME", key, "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HTTL - Retrieves the remaining time to live for specified fields in a hash in seconds.
+// Requires a key and the fields themselves. It returns the TTL for each specified field.
+// This command fetches the TTL in seconds for each field or returns error/status codes as appropriate.
+// Available since Redis 7.4 CE.
+// For more information refer to [HTTL Documentation].
+//
+// [HTTL Documentation]: https://redis.io/commands/httl/
+func (c cmdable) HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HTTL", key, "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HPTTL - Retrieves the remaining time to live for specified fields in a hash in milliseconds.
+// Similar to HTTL, but returns the TTL in milliseconds. It requires a key and the specified fields.
+// This command provides the TTL in milliseconds for each field or returns error/status codes as needed.
+// Available since Redis 7.4 CE.
+// For more information refer to [HPTTL Documentation].
+//
+// [HPTTL Documentation]: https://redis.io/commands/hpttl/
+// For more information - https://redis.io/commands/hpttl/
+func (c cmdable) HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd {
+ args := []interface{}{"HPTTL", key, "FIELDS", len(fields)}
+
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd {
+ args := []interface{}{"HGETDEL", key, "FIELDS", len(fields)}
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd {
+ args := []interface{}{"HGETEX", key, "FIELDS", len(fields)}
+ for _, field := range fields {
+ args = append(args, field)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HGetEXExpirationType represents an expiration option for the HGETEX command.
+type HGetEXExpirationType string
+
+const (
+ HGetEXExpirationEX HGetEXExpirationType = "EX"
+ HGetEXExpirationPX HGetEXExpirationType = "PX"
+ HGetEXExpirationEXAT HGetEXExpirationType = "EXAT"
+ HGetEXExpirationPXAT HGetEXExpirationType = "PXAT"
+ HGetEXExpirationPERSIST HGetEXExpirationType = "PERSIST"
+)
+
+type HGetEXOptions struct {
+ ExpirationType HGetEXExpirationType
+ ExpirationVal int64
+}
+
+func (c cmdable) HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd {
+ args := []interface{}{"HGETEX", key}
+ if options.ExpirationType != "" {
+ args = append(args, string(options.ExpirationType))
+ if options.ExpirationType != HGetEXExpirationPERSIST {
+ args = append(args, options.ExpirationVal)
+ }
+ }
+
+ args = append(args, "FIELDS", len(fields))
+ for _, field := range fields {
+ args = append(args, field)
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type HSetEXCondition string
+
+const (
+ HSetEXFNX HSetEXCondition = "FNX" // Only set the fields if none of them already exist.
+ HSetEXFXX HSetEXCondition = "FXX" // Only set the fields if all already exist.
+)
+
+type HSetEXExpirationType string
+
+const (
+ HSetEXExpirationEX HSetEXExpirationType = "EX"
+ HSetEXExpirationPX HSetEXExpirationType = "PX"
+ HSetEXExpirationEXAT HSetEXExpirationType = "EXAT"
+ HSetEXExpirationPXAT HSetEXExpirationType = "PXAT"
+ HSetEXExpirationKEEPTTL HSetEXExpirationType = "KEEPTTL"
+)
+
+type HSetEXOptions struct {
+ Condition HSetEXCondition
+ ExpirationType HSetEXExpirationType
+ ExpirationVal int64
+}
+
+func (c cmdable) HSetEX(ctx context.Context, key string, fieldsAndValues ...string) *IntCmd {
+ args := []interface{}{"HSETEX", key, "FIELDS", len(fieldsAndValues) / 2}
+ for _, field := range fieldsAndValues {
+ args = append(args, field)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HSetEXWithArgs(ctx context.Context, key string, options *HSetEXOptions, fieldsAndValues ...string) *IntCmd {
+ args := []interface{}{"HSETEX", key}
+ if options.Condition != "" {
+ args = append(args, string(options.Condition))
+ }
+ if options.ExpirationType != "" {
+ args = append(args, string(options.ExpirationType))
+ if options.ExpirationType != HSetEXExpirationKEEPTTL {
+ args = append(args, options.ExpirationVal)
+ }
+ }
+ args = append(args, "FIELDS", len(fieldsAndValues)/2)
+ for _, field := range fieldsAndValues {
+ args = append(args, field)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/hyperloglog_commands.go b/vendor/github.com/redis/go-redis/v9/hyperloglog_commands.go
new file mode 100644
index 0000000..5a608fa
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/hyperloglog_commands.go
@@ -0,0 +1,42 @@
+package redis
+
+import "context"
+
+type HyperLogLogCmdable interface {
+ PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+ PFCount(ctx context.Context, keys ...string) *IntCmd
+ PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+}
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/arg.go b/vendor/github.com/redis/go-redis/v9/internal/arg.go
new file mode 100644
index 0000000..2e5ca33
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/arg.go
@@ -0,0 +1,58 @@
+package internal
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+func AppendArg(b []byte, v interface{}) []byte {
+ switch v := v.(type) {
+ case nil:
+ return append(b, ""...)
+ case string:
+ return appendUTF8String(b, util.StringToBytes(v))
+ case []byte:
+ return appendUTF8String(b, v)
+ case int:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int8:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int16:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int32:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int64:
+ return strconv.AppendInt(b, v, 10)
+ case uint:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint8:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint16:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint32:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint64:
+ return strconv.AppendUint(b, v, 10)
+ case float32:
+ return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
+ case float64:
+ return strconv.AppendFloat(b, v, 'f', -1, 64)
+ case bool:
+ if v {
+ return append(b, "true"...)
+ }
+ return append(b, "false"...)
+ case time.Time:
+ return v.AppendFormat(b, time.RFC3339Nano)
+ default:
+ return append(b, fmt.Sprint(v)...)
+ }
+}
+
+func appendUTF8String(dst []byte, src []byte) []byte {
+ dst = append(dst, src...)
+ return dst
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go b/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go
new file mode 100644
index 0000000..f13ee81
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/hashtag/hashtag.go
@@ -0,0 +1,78 @@
+package hashtag
+
+import (
+ "strings"
+
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+const slotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+ if s := strings.IndexByte(key, '{'); s > -1 {
+ if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+ return key[s+1 : s+e+1]
+ }
+ }
+ return key
+}
+
+func RandomSlot() int {
+ return rand.Intn(slotNumber)
+}
+
+// Slot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+ if key == "" {
+ return RandomSlot()
+ }
+ key = Key(key)
+ return int(crc16sum(key)) % slotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+ for i := 0; i < len(key); i++ {
+ crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+ }
+ return
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/hscan/hscan.go b/vendor/github.com/redis/go-redis/v9/internal/hscan/hscan.go
new file mode 100644
index 0000000..203ec4a
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/hscan/hscan.go
@@ -0,0 +1,207 @@
+package hscan
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// decoderFunc represents decoding functions for default built-in types.
+type decoderFunc func(reflect.Value, string) error
+
+// Scanner is the interface implemented by themselves,
+// which will override the decoding behavior of decoderFunc.
+type Scanner interface {
+ ScanRedis(s string) error
+}
+
+var (
+ // List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
+ decoders = []decoderFunc{
+ reflect.Bool: decodeBool,
+ reflect.Int: decodeInt,
+ reflect.Int8: decodeInt8,
+ reflect.Int16: decodeInt16,
+ reflect.Int32: decodeInt32,
+ reflect.Int64: decodeInt64,
+ reflect.Uint: decodeUint,
+ reflect.Uint8: decodeUint8,
+ reflect.Uint16: decodeUint16,
+ reflect.Uint32: decodeUint32,
+ reflect.Uint64: decodeUint64,
+ reflect.Float32: decodeFloat32,
+ reflect.Float64: decodeFloat64,
+ reflect.Complex64: decodeUnsupported,
+ reflect.Complex128: decodeUnsupported,
+ reflect.Array: decodeUnsupported,
+ reflect.Chan: decodeUnsupported,
+ reflect.Func: decodeUnsupported,
+ reflect.Interface: decodeUnsupported,
+ reflect.Map: decodeUnsupported,
+ reflect.Ptr: decodeUnsupported,
+ reflect.Slice: decodeSlice,
+ reflect.String: decodeString,
+ reflect.Struct: decodeUnsupported,
+ reflect.UnsafePointer: decodeUnsupported,
+ }
+
+ // Global map of struct field specs that is populated once for every new
+ // struct type that is scanned. This caches the field types and the corresponding
+ // decoder functions to avoid iterating through struct fields on subsequent scans.
+ globalStructMap = newStructMap()
+)
+
+func Struct(dst interface{}) (StructValue, error) {
+ v := reflect.ValueOf(dst)
+
+ // The destination to scan into should be a struct pointer.
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
+ }
+
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
+ }
+
+ return StructValue{
+ spec: globalStructMap.get(v.Type()),
+ value: v,
+ }, nil
+}
+
+// Scan scans the results from a key-value Redis map result set to a destination struct.
+// The Redis keys are matched to the struct's field with the `redis` tag.
+func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
+ if len(keys) != len(vals) {
+ return errors.New("args should have the same number of keys and vals")
+ }
+
+ strct, err := Struct(dst)
+ if err != nil {
+ return err
+ }
+
+ // Iterate through the (key, value) sequence.
+ for i := 0; i < len(vals); i++ {
+ key, ok := keys[i].(string)
+ if !ok {
+ continue
+ }
+
+ val, ok := vals[i].(string)
+ if !ok {
+ continue
+ }
+
+ if err := strct.Scan(key, val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func decodeBool(f reflect.Value, s string) error {
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ return err
+ }
+ f.SetBool(b)
+ return nil
+}
+
+func decodeInt8(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 8)
+}
+
+func decodeInt16(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 16)
+}
+
+func decodeInt32(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 32)
+}
+
+func decodeInt64(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 64)
+}
+
+func decodeInt(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 0)
+}
+
+func decodeNumber(f reflect.Value, s string, bitSize int) error {
+ v, err := strconv.ParseInt(s, 10, bitSize)
+ if err != nil {
+ return err
+ }
+ f.SetInt(v)
+ return nil
+}
+
+func decodeUint8(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 8)
+}
+
+func decodeUint16(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 16)
+}
+
+func decodeUint32(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 32)
+}
+
+func decodeUint64(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 64)
+}
+
+func decodeUint(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 0)
+}
+
+func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
+ v, err := strconv.ParseUint(s, 10, bitSize)
+ if err != nil {
+ return err
+ }
+ f.SetUint(v)
+ return nil
+}
+
+func decodeFloat32(f reflect.Value, s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ return err
+ }
+ f.SetFloat(v)
+ return nil
+}
+
+// although the default is float64, but we better define it.
+func decodeFloat64(f reflect.Value, s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return err
+ }
+ f.SetFloat(v)
+ return nil
+}
+
+func decodeString(f reflect.Value, s string) error {
+ f.SetString(s)
+ return nil
+}
+
+func decodeSlice(f reflect.Value, s string) error {
+ // []byte slice ([]uint8).
+ if f.Type().Elem().Kind() == reflect.Uint8 {
+ f.SetBytes([]byte(s))
+ }
+ return nil
+}
+
+func decodeUnsupported(v reflect.Value, s string) error {
+ return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/hscan/structmap.go b/vendor/github.com/redis/go-redis/v9/internal/hscan/structmap.go
new file mode 100644
index 0000000..1a560e4
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/hscan/structmap.go
@@ -0,0 +1,125 @@
+package hscan
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+// structMap contains the map of struct fields for target structs
+// indexed by the struct type.
+type structMap struct {
+ m sync.Map
+}
+
+func newStructMap() *structMap {
+ return new(structMap)
+}
+
+func (s *structMap) get(t reflect.Type) *structSpec {
+ if v, ok := s.m.Load(t); ok {
+ return v.(*structSpec)
+ }
+
+ spec := newStructSpec(t, "redis")
+ s.m.Store(t, spec)
+ return spec
+}
+
+//------------------------------------------------------------------------------
+
+// structSpec contains the list of all fields in a target struct.
+type structSpec struct {
+ m map[string]*structField
+}
+
+func (s *structSpec) set(tag string, sf *structField) {
+ s.m[tag] = sf
+}
+
+func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
+ numField := t.NumField()
+ out := &structSpec{
+ m: make(map[string]*structField, numField),
+ }
+
+ for i := 0; i < numField; i++ {
+ f := t.Field(i)
+
+ tag := f.Tag.Get(fieldTag)
+ if tag == "" || tag == "-" {
+ continue
+ }
+
+ tag = strings.Split(tag, ",")[0]
+ if tag == "" {
+ continue
+ }
+
+ // Use the built-in decoder.
+ kind := f.Type.Kind()
+ if kind == reflect.Pointer {
+ kind = f.Type.Elem().Kind()
+ }
+ out.set(tag, &structField{index: i, fn: decoders[kind]})
+ }
+
+ return out
+}
+
+//------------------------------------------------------------------------------
+
+// structField represents a single field in a target struct.
+type structField struct {
+ index int
+ fn decoderFunc
+}
+
+//------------------------------------------------------------------------------
+
+type StructValue struct {
+ spec *structSpec
+ value reflect.Value
+}
+
+func (s StructValue) Scan(key string, value string) error {
+ field, ok := s.spec.m[key]
+ if !ok {
+ return nil
+ }
+
+ v := s.value.Field(field.index)
+ isPtr := v.Kind() == reflect.Ptr
+
+ if isPtr && v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if !isPtr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ isPtr = true
+ }
+
+ if isPtr && v.Type().NumMethod() > 0 && v.CanInterface() {
+ switch scan := v.Interface().(type) {
+ case Scanner:
+ return scan.ScanRedis(value)
+ case encoding.TextUnmarshaler:
+ return scan.UnmarshalText(util.StringToBytes(value))
+ }
+ }
+
+ if isPtr {
+ v = v.Elem()
+ }
+
+ if err := field.fn(v, value); err != nil {
+ t := s.value.Type()
+ return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
+ value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
+ }
+ return nil
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/internal.go b/vendor/github.com/redis/go-redis/v9/internal/internal.go
new file mode 100644
index 0000000..e783d13
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/internal.go
@@ -0,0 +1,29 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+ if retry < 0 {
+ panic("not reached")
+ }
+ if minBackoff == 0 {
+ return 0
+ }
+
+ d := minBackoff << uint(retry)
+ if d < minBackoff {
+ return maxBackoff
+ }
+
+ d = minBackoff + time.Duration(rand.Int63n(int64(d)))
+
+ if d > maxBackoff || d < minBackoff {
+ d = maxBackoff
+ }
+
+ return d
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/log.go b/vendor/github.com/redis/go-redis/v9/internal/log.go
new file mode 100644
index 0000000..c8b9213
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/log.go
@@ -0,0 +1,26 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+)
+
+type Logging interface {
+ Printf(ctx context.Context, format string, v ...interface{})
+}
+
+type logger struct {
+ log *log.Logger
+}
+
+func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
+ _ = l.log.Output(2, fmt.Sprintf(format, v...))
+}
+
+// Logger calls Output to print to the stderr.
+// Arguments are handled in the manner of fmt.Print.
+var Logger Logging = &logger{
+ log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/once.go b/vendor/github.com/redis/go-redis/v9/internal/once.go
new file mode 100644
index 0000000..b81244f
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/once.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+ m sync.Mutex
+ done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once. In other words, given
+//
+// var once Once
+//
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error. A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once. Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+//
+// err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+ if atomic.LoadUint32(&o.done) == 1 {
+ return nil
+ }
+ // Slow-path.
+ o.m.Lock()
+ defer o.m.Unlock()
+ var err error
+ if o.done == 0 {
+ err = f()
+ if err == nil {
+ atomic.StoreUint32(&o.done, 1)
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go
new file mode 100644
index 0000000..c1087b4
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go
@@ -0,0 +1,137 @@
+package pool
+
+import (
+ "bufio"
+ "context"
+ "net"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+ usedAt int64 // atomic
+ netConn net.Conn
+
+ rd *proto.Reader
+ bw *bufio.Writer
+ wr *proto.Writer
+
+ Inited bool
+ pooled bool
+ createdAt time.Time
+
+ onClose func() error
+}
+
+func NewConn(netConn net.Conn) *Conn {
+ cn := &Conn{
+ netConn: netConn,
+ createdAt: time.Now(),
+ }
+ cn.rd = proto.NewReader(netConn)
+ cn.bw = bufio.NewWriter(netConn)
+ cn.wr = proto.NewWriter(cn.bw)
+ cn.SetUsedAt(time.Now())
+ return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+ unix := atomic.LoadInt64(&cn.usedAt)
+ return time.Unix(unix, 0)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+ atomic.StoreInt64(&cn.usedAt, tm.Unix())
+}
+
+func (cn *Conn) SetOnClose(fn func() error) {
+ cn.onClose = fn
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+ cn.netConn = netConn
+ cn.rd.Reset(netConn)
+ cn.bw.Reset(netConn)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+ return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+ if cn.netConn != nil {
+ return cn.netConn.RemoteAddr()
+ }
+ return nil
+}
+
+func (cn *Conn) WithReader(
+ ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error,
+) error {
+ if timeout >= 0 {
+ if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
+ }
+ return fn(cn.rd)
+}
+
+func (cn *Conn) WithWriter(
+ ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
+) error {
+ if timeout >= 0 {
+ if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
+ }
+
+ if cn.bw.Buffered() > 0 {
+ cn.bw.Reset(cn.netConn)
+ }
+
+ if err := fn(cn.wr); err != nil {
+ return err
+ }
+
+ return cn.bw.Flush()
+}
+
+func (cn *Conn) Close() error {
+ if cn.onClose != nil {
+ // ignore error
+ _ = cn.onClose()
+ }
+ return cn.netConn.Close()
+}
+
+func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
+ tm := time.Now()
+ cn.SetUsedAt(tm)
+
+ if timeout > 0 {
+ tm = tm.Add(timeout)
+ }
+
+ if ctx != nil {
+ deadline, ok := ctx.Deadline()
+ if ok {
+ if timeout == 0 {
+ return deadline
+ }
+ if deadline.Before(tm) {
+ return deadline
+ }
+ return tm
+ }
+ }
+
+ if timeout > 0 {
+ return tm
+ }
+
+ return noDeadline
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go
new file mode 100644
index 0000000..83190d3
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go
@@ -0,0 +1,49 @@
+//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
+
+package pool
+
+import (
+ "errors"
+ "io"
+ "net"
+ "syscall"
+ "time"
+)
+
+var errUnexpectedRead = errors.New("unexpected read from socket")
+
+func connCheck(conn net.Conn) error {
+ // Reset previous timeout.
+ _ = conn.SetDeadline(time.Time{})
+
+ sysConn, ok := conn.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ rawConn, err := sysConn.SyscallConn()
+ if err != nil {
+ return err
+ }
+
+ var sysErr error
+
+ if err := rawConn.Read(func(fd uintptr) bool {
+ var buf [1]byte
+ n, err := syscall.Read(int(fd), buf[:])
+ switch {
+ case n == 0 && err == nil:
+ sysErr = io.EOF
+ case n > 0:
+ sysErr = errUnexpectedRead
+ case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
+ sysErr = nil
+ default:
+ sysErr = err
+ }
+ return true
+ }); err != nil {
+ return err
+ }
+
+ return sysErr
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go
new file mode 100644
index 0000000..295da12
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go
@@ -0,0 +1,9 @@
+//go:build !linux && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !illumos
+
+package pool
+
+import "net"
+
+func connCheck(conn net.Conn) error {
+ return nil
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go
new file mode 100644
index 0000000..e7d951e
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go
@@ -0,0 +1,534 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+)
+
+var (
+ // ErrClosed performs any operation on the closed client will return this error.
+ ErrClosed = errors.New("redis: client is closed")
+
+ // ErrPoolExhausted is returned from a pool connection method
+ // when the maximum number of database connections in the pool has been reached.
+ ErrPoolExhausted = errors.New("redis: connection pool exhausted")
+
+ // ErrPoolTimeout timed out waiting to get a connection from the connection pool.
+ ErrPoolTimeout = errors.New("redis: connection pool timeout")
+)
+
+var timers = sync.Pool{
+ New: func() interface{} {
+ t := time.NewTimer(time.Hour)
+ t.Stop()
+ return t
+ },
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+ Hits uint32 // number of times free connection was found in the pool
+ Misses uint32 // number of times free connection was NOT found in the pool
+ Timeouts uint32 // number of times a wait timeout occurred
+ WaitCount uint32 // number of times a connection was waited
+ WaitDurationNs int64 // total time spent for waiting a connection in nanoseconds
+
+ TotalConns uint32 // number of total connections in the pool
+ IdleConns uint32 // number of idle connections in the pool
+ StaleConns uint32 // number of stale connections removed from the pool
+}
+
+type Pooler interface {
+ NewConn(context.Context) (*Conn, error)
+ CloseConn(*Conn) error
+
+ Get(context.Context) (*Conn, error)
+ Put(context.Context, *Conn)
+ Remove(context.Context, *Conn, error)
+
+ Len() int
+ IdleLen() int
+ Stats() *Stats
+
+ Close() error
+}
+
+type Options struct {
+ Dialer func(context.Context) (net.Conn, error)
+
+ PoolFIFO bool
+ PoolSize int
+ DialTimeout time.Duration
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+}
+
+type lastDialErrorWrap struct {
+ err error
+}
+
+type ConnPool struct {
+ cfg *Options
+
+ dialErrorsNum uint32 // atomic
+ lastDialError atomic.Value
+
+ queue chan struct{}
+
+ connsMu sync.Mutex
+ conns []*Conn
+ idleConns []*Conn
+
+ poolSize int
+ idleConnsLen int
+
+ stats Stats
+ waitDurationNs atomic.Int64
+
+ _closed uint32 // atomic
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+ p := &ConnPool{
+ cfg: opt,
+
+ queue: make(chan struct{}, opt.PoolSize),
+ conns: make([]*Conn, 0, opt.PoolSize),
+ idleConns: make([]*Conn, 0, opt.PoolSize),
+ }
+
+ p.connsMu.Lock()
+ p.checkMinIdleConns()
+ p.connsMu.Unlock()
+
+ return p
+}
+
+func (p *ConnPool) checkMinIdleConns() {
+ if p.cfg.MinIdleConns == 0 {
+ return
+ }
+ for p.poolSize < p.cfg.PoolSize && p.idleConnsLen < p.cfg.MinIdleConns {
+ select {
+ case p.queue <- struct{}{}:
+ p.poolSize++
+ p.idleConnsLen++
+
+ go func() {
+ err := p.addIdleConn()
+ if err != nil && err != ErrClosed {
+ p.connsMu.Lock()
+ p.poolSize--
+ p.idleConnsLen--
+ p.connsMu.Unlock()
+ }
+
+ p.freeTurn()
+ }()
+ default:
+ return
+ }
+ }
+}
+
+func (p *ConnPool) addIdleConn() error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout)
+ defer cancel()
+
+ cn, err := p.dialConn(ctx, true)
+ if err != nil {
+ return err
+ }
+
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ // It is not allowed to add new connections to the closed connection pool.
+ if p.closed() {
+ _ = cn.Close()
+ return ErrClosed
+ }
+
+ p.conns = append(p.conns, cn)
+ p.idleConns = append(p.idleConns, cn)
+ return nil
+}
+
+func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.newConn(ctx, false)
+}
+
+func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ p.connsMu.Lock()
+ if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns {
+ p.connsMu.Unlock()
+ return nil, ErrPoolExhausted
+ }
+ p.connsMu.Unlock()
+
+ cn, err := p.dialConn(ctx, pooled)
+ if err != nil {
+ return nil, err
+ }
+
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ if p.cfg.MaxActiveConns > 0 && p.poolSize >= p.cfg.MaxActiveConns {
+ _ = cn.Close()
+ return nil, ErrPoolExhausted
+ }
+
+ p.conns = append(p.conns, cn)
+ if pooled {
+ // If pool is full remove the cn on next Put.
+ if p.poolSize >= p.cfg.PoolSize {
+ cn.pooled = false
+ } else {
+ p.poolSize++
+ }
+ }
+
+ return cn, nil
+}
+
+func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.cfg.PoolSize) {
+ return nil, p.getLastDialError()
+ }
+
+ netConn, err := p.cfg.Dialer(ctx)
+ if err != nil {
+ p.setLastDialError(err)
+ if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.cfg.PoolSize) {
+ go p.tryDial()
+ }
+ return nil, err
+ }
+
+ cn := NewConn(netConn)
+ cn.pooled = pooled
+ return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+ for {
+ if p.closed() {
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout)
+
+ conn, err := p.cfg.Dialer(ctx)
+ if err != nil {
+ p.setLastDialError(err)
+ time.Sleep(time.Second)
+ cancel()
+ continue
+ }
+
+ atomic.StoreUint32(&p.dialErrorsNum, 0)
+ _ = conn.Close()
+ cancel()
+ return
+ }
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+ p.lastDialError.Store(&lastDialErrorWrap{err: err})
+}
+
+func (p *ConnPool) getLastDialError() error {
+ err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
+ if err != nil {
+ return err.err
+ }
+ return nil
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ if err := p.waitTurn(ctx); err != nil {
+ return nil, err
+ }
+
+ for {
+ p.connsMu.Lock()
+ cn, err := p.popIdle()
+ p.connsMu.Unlock()
+
+ if err != nil {
+ p.freeTurn()
+ return nil, err
+ }
+
+ if cn == nil {
+ break
+ }
+
+ if !p.isHealthyConn(cn) {
+ _ = p.CloseConn(cn)
+ continue
+ }
+
+ atomic.AddUint32(&p.stats.Hits, 1)
+ return cn, nil
+ }
+
+ atomic.AddUint32(&p.stats.Misses, 1)
+
+ newcn, err := p.newConn(ctx, true)
+ if err != nil {
+ p.freeTurn()
+ return nil, err
+ }
+
+ return newcn, nil
+}
+
+func (p *ConnPool) waitTurn(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ select {
+ case p.queue <- struct{}{}:
+ return nil
+ default:
+ }
+
+ start := time.Now()
+ timer := timers.Get().(*time.Timer)
+ timer.Reset(p.cfg.PoolTimeout)
+
+ select {
+ case <-ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return ctx.Err()
+ case p.queue <- struct{}{}:
+ p.waitDurationNs.Add(time.Since(start).Nanoseconds())
+ atomic.AddUint32(&p.stats.WaitCount, 1)
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return nil
+ case <-timer.C:
+ timers.Put(timer)
+ atomic.AddUint32(&p.stats.Timeouts, 1)
+ return ErrPoolTimeout
+ }
+}
+
+func (p *ConnPool) freeTurn() {
+ <-p.queue
+}
+
+func (p *ConnPool) popIdle() (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+ n := len(p.idleConns)
+ if n == 0 {
+ return nil, nil
+ }
+
+ var cn *Conn
+ if p.cfg.PoolFIFO {
+ cn = p.idleConns[0]
+ copy(p.idleConns, p.idleConns[1:])
+ p.idleConns = p.idleConns[:n-1]
+ } else {
+ idx := n - 1
+ cn = p.idleConns[idx]
+ p.idleConns = p.idleConns[:idx]
+ }
+ p.idleConnsLen--
+ p.checkMinIdleConns()
+ return cn, nil
+}
+
+func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
+ if cn.rd.Buffered() > 0 {
+ internal.Logger.Printf(ctx, "Conn has unread data")
+ p.Remove(ctx, cn, BadConnError{})
+ return
+ }
+
+ if !cn.pooled {
+ p.Remove(ctx, cn, nil)
+ return
+ }
+
+ var shouldCloseConn bool
+
+ p.connsMu.Lock()
+
+ if p.cfg.MaxIdleConns == 0 || p.idleConnsLen < p.cfg.MaxIdleConns {
+ p.idleConns = append(p.idleConns, cn)
+ p.idleConnsLen++
+ } else {
+ p.removeConn(cn)
+ shouldCloseConn = true
+ }
+
+ p.connsMu.Unlock()
+
+ p.freeTurn()
+
+ if shouldCloseConn {
+ _ = p.closeConn(cn)
+ }
+}
+
+func (p *ConnPool) Remove(_ context.Context, cn *Conn, reason error) {
+ p.removeConnWithLock(cn)
+ p.freeTurn()
+ _ = p.closeConn(cn)
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+ p.removeConnWithLock(cn)
+ return p.closeConn(cn)
+}
+
+func (p *ConnPool) removeConnWithLock(cn *Conn) {
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+ p.removeConn(cn)
+}
+
+func (p *ConnPool) removeConn(cn *Conn) {
+ for i, c := range p.conns {
+ if c == cn {
+ p.conns = append(p.conns[:i], p.conns[i+1:]...)
+ if cn.pooled {
+ p.poolSize--
+ p.checkMinIdleConns()
+ }
+ break
+ }
+ }
+ atomic.AddUint32(&p.stats.StaleConns, 1)
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+ return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+ p.connsMu.Lock()
+ n := len(p.conns)
+ p.connsMu.Unlock()
+ return n
+}
+
+// IdleLen returns number of idle connections.
+func (p *ConnPool) IdleLen() int {
+ p.connsMu.Lock()
+ n := p.idleConnsLen
+ p.connsMu.Unlock()
+ return n
+}
+
+func (p *ConnPool) Stats() *Stats {
+ return &Stats{
+ Hits: atomic.LoadUint32(&p.stats.Hits),
+ Misses: atomic.LoadUint32(&p.stats.Misses),
+ Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+ WaitCount: atomic.LoadUint32(&p.stats.WaitCount),
+ WaitDurationNs: p.waitDurationNs.Load(),
+
+ TotalConns: uint32(p.Len()),
+ IdleConns: uint32(p.IdleLen()),
+ StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
+ }
+}
+
+func (p *ConnPool) closed() bool {
+ return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ var firstErr error
+ for _, cn := range p.conns {
+ if fn(cn) {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+ return firstErr
+}
+
+func (p *ConnPool) Close() error {
+ if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+ return ErrClosed
+ }
+
+ var firstErr error
+ p.connsMu.Lock()
+ for _, cn := range p.conns {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ p.conns = nil
+ p.poolSize = 0
+ p.idleConns = nil
+ p.idleConnsLen = 0
+ p.connsMu.Unlock()
+
+ return firstErr
+}
+
+func (p *ConnPool) isHealthyConn(cn *Conn) bool {
+ now := time.Now()
+
+ if p.cfg.ConnMaxLifetime > 0 && now.Sub(cn.createdAt) >= p.cfg.ConnMaxLifetime {
+ return false
+ }
+ if p.cfg.ConnMaxIdleTime > 0 && now.Sub(cn.UsedAt()) >= p.cfg.ConnMaxIdleTime {
+ return false
+ }
+
+ if connCheck(cn.netConn) != nil {
+ return false
+ }
+
+ cn.SetUsedAt(now)
+ return true
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go
new file mode 100644
index 0000000..5a3fde1
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_single.go
@@ -0,0 +1,58 @@
+package pool
+
+import "context"
+
+type SingleConnPool struct {
+ pool Pooler
+ cn *Conn
+ stickyErr error
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
+ return &SingleConnPool{
+ pool: pool,
+ cn: cn,
+ }
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.stickyErr != nil {
+ return nil, p.stickyErr
+ }
+ return p.cn, nil
+}
+
+func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
+
+func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ p.cn = nil
+ p.stickyErr = reason
+}
+
+func (p *SingleConnPool) Close() error {
+ p.cn = nil
+ p.stickyErr = ErrClosed
+ return nil
+}
+
+func (p *SingleConnPool) Len() int {
+ return 0
+}
+
+func (p *SingleConnPool) IdleLen() int {
+ return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go
new file mode 100644
index 0000000..3adb99b
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool_sticky.go
@@ -0,0 +1,201 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync/atomic"
+)
+
+const (
+ stateDefault = 0
+ stateInited = 1
+ stateClosed = 2
+)
+
+type BadConnError struct {
+ wrapped error
+}
+
+var _ error = (*BadConnError)(nil)
+
+func (e BadConnError) Error() string {
+ s := "redis: Conn is in a bad state"
+ if e.wrapped != nil {
+ s += ": " + e.wrapped.Error()
+ }
+ return s
+}
+
+func (e BadConnError) Unwrap() error {
+ return e.wrapped
+}
+
+//------------------------------------------------------------------------------
+
+type StickyConnPool struct {
+ pool Pooler
+ shared int32 // atomic
+
+ state uint32 // atomic
+ ch chan *Conn
+
+ _badConnError atomic.Value
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool Pooler) *StickyConnPool {
+ p, ok := pool.(*StickyConnPool)
+ if !ok {
+ p = &StickyConnPool{
+ pool: pool,
+ ch: make(chan *Conn, 1),
+ }
+ }
+ atomic.AddInt32(&p.shared, 1)
+ return p
+}
+
+func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *StickyConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
+ // In worst case this races with Close which is not a very common operation.
+ for i := 0; i < 1000; i++ {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ cn, err := p.pool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+ return cn, nil
+ }
+ p.pool.Remove(ctx, cn, ErrClosed)
+ case stateInited:
+ if err := p.badConnError(); err != nil {
+ return nil, err
+ }
+ cn, ok := <-p.ch
+ if !ok {
+ return nil, ErrClosed
+ }
+ return cn, nil
+ case stateClosed:
+ return nil, ErrClosed
+ default:
+ panic("not reached")
+ }
+ }
+ return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
+}
+
+func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
+ defer func() {
+ if recover() != nil {
+ p.freeConn(ctx, cn)
+ }
+ }()
+ p.ch <- cn
+}
+
+func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
+ if err := p.badConnError(); err != nil {
+ p.pool.Remove(ctx, cn, err)
+ } else {
+ p.pool.Put(ctx, cn)
+ }
+}
+
+func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ defer func() {
+ if recover() != nil {
+ p.pool.Remove(ctx, cn, ErrClosed)
+ }
+ }()
+ p._badConnError.Store(BadConnError{wrapped: reason})
+ p.ch <- cn
+}
+
+func (p *StickyConnPool) Close() error {
+ if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
+ return nil
+ }
+
+ for i := 0; i < 1000; i++ {
+ state := atomic.LoadUint32(&p.state)
+ if state == stateClosed {
+ return ErrClosed
+ }
+ if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
+ close(p.ch)
+ cn, ok := <-p.ch
+ if ok {
+ p.freeConn(context.TODO(), cn)
+ }
+ return nil
+ }
+ }
+
+ return errors.New("redis: StickyConnPool.Close: infinite loop")
+}
+
+func (p *StickyConnPool) Reset(ctx context.Context) error {
+ if p.badConnError() == nil {
+ return nil
+ }
+
+ select {
+ case cn, ok := <-p.ch:
+ if !ok {
+ return ErrClosed
+ }
+ p.pool.Remove(ctx, cn, ErrClosed)
+ p._badConnError.Store(BadConnError{wrapped: nil})
+ default:
+ return errors.New("redis: StickyConnPool does not have a Conn")
+ }
+
+ if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
+ state := atomic.LoadUint32(&p.state)
+ return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
+ }
+
+ return nil
+}
+
+func (p *StickyConnPool) badConnError() error {
+ if v := p._badConnError.Load(); v != nil {
+ if err := v.(BadConnError); err.wrapped != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *StickyConnPool) Len() int {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ return 0
+ case stateInited:
+ return 1
+ case stateClosed:
+ return 0
+ default:
+ panic("not reached")
+ }
+}
+
+func (p *StickyConnPool) IdleLen() int {
+ return len(p.ch)
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go b/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go
new file mode 100644
index 0000000..8d23817
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/proto/reader.go
@@ -0,0 +1,552 @@
+package proto
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+// redis resp protocol data type.
+const (
+ RespStatus = '+' // +\r\n
+ RespError = '-' // -\r\n
+ RespString = '$' // $\r\n\r\n
+ RespInt = ':' // :\r\n
+ RespNil = '_' // _\r\n
+ RespFloat = ',' // ,\r\n (golang float)
+ RespBool = '#' // true: #t\r\n false: #f\r\n
+ RespBlobError = '!' // !\r\n\r\n
+ RespVerbatim = '=' // =\r\nFORMAT:\r\n
+ RespBigInt = '(' // (\r\n
+ RespArray = '*' // *\r\n... (same as resp2)
+ RespMap = '%' // %\r\n(key)\r\n(value)\r\n... (golang map)
+ RespSet = '~' // ~\r\n... (same as Array)
+ RespAttr = '|' // |\r\n(key)\r\n(value)\r\n... + command reply
+ RespPush = '>' // >\r\n... (same as Array)
+)
+
+// Not used temporarily.
+// Redis has not used these two data types for the time being, and will implement them later.
+// Streamed = "EOF:"
+// StreamedAggregated = '?'
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil") // nolint:errname
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+func ParseErrorReply(line []byte) error {
+ return RedisError(line[1:])
+}
+
+//------------------------------------------------------------------------------
+
+type Reader struct {
+ rd *bufio.Reader
+}
+
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ rd: bufio.NewReader(rd),
+ }
+}
+
+func (r *Reader) Buffered() int {
+ return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+ return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+ r.rd.Reset(rd)
+}
+
+// PeekReplyType returns the data type of the next response without advancing the Reader,
+// and discard the attribute type.
+func (r *Reader) PeekReplyType() (byte, error) {
+ b, err := r.rd.Peek(1)
+ if err != nil {
+ return 0, err
+ }
+ if b[0] == RespAttr {
+ if err = r.DiscardNext(); err != nil {
+ return 0, err
+ }
+ return r.PeekReplyType()
+ }
+ return b[0], nil
+}
+
+// ReadLine Return a valid reply, it will check the protocol or redis error,
+// and discard the attribute type.
+func (r *Reader) ReadLine() ([]byte, error) {
+ line, err := r.readLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case RespError:
+ return nil, ParseErrorReply(line)
+ case RespNil:
+ return nil, Nil
+ case RespBlobError:
+ var blobErr string
+ blobErr, err = r.readStringReply(line)
+ if err == nil {
+ err = RedisError(blobErr)
+ }
+ return nil, err
+ case RespAttr:
+ if err = r.Discard(line); err != nil {
+ return nil, err
+ }
+ return r.ReadLine()
+ }
+
+ // Compatible with RESP2
+ if IsNilReply(line) {
+ return nil, Nil
+ }
+
+ return line, nil
+}
+
+// readLine returns an error if:
+// - there is a pending read error;
+// - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+ b, err := r.rd.ReadSlice('\n')
+ if err != nil {
+ if err != bufio.ErrBufferFull {
+ return nil, err
+ }
+
+ full := make([]byte, len(b))
+ copy(full, b)
+
+ b, err = r.rd.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ full = append(full, b...) //nolint:makezero
+ b = full
+ }
+ if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+ return nil, fmt.Errorf("redis: invalid reply: %q", b)
+ }
+ return b[:len(b)-2], nil
+}
+
+func (r *Reader) ReadReply() (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case RespStatus:
+ return string(line[1:]), nil
+ case RespInt:
+ return util.ParseInt(line[1:], 10, 64)
+ case RespFloat:
+ return r.readFloat(line)
+ case RespBool:
+ return r.readBool(line)
+ case RespBigInt:
+ return r.readBigInt(line)
+
+ case RespString:
+ return r.readStringReply(line)
+ case RespVerbatim:
+ return r.readVerb(line)
+
+ case RespArray, RespSet, RespPush:
+ return r.readSlice(line)
+ case RespMap:
+ return r.readMap(line)
+ }
+ return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) readFloat(line []byte) (float64, error) {
+ v := string(line[1:])
+ switch string(line[1:]) {
+ case "inf":
+ return math.Inf(1), nil
+ case "-inf":
+ return math.Inf(-1), nil
+ case "nan", "-nan":
+ return math.NaN(), nil
+ }
+ return strconv.ParseFloat(v, 64)
+}
+
+func (r *Reader) readBool(line []byte) (bool, error) {
+ switch string(line[1:]) {
+ case "t":
+ return true, nil
+ case "f":
+ return false, nil
+ }
+ return false, fmt.Errorf("redis: can't parse bool reply: %q", line)
+}
+
+func (r *Reader) readBigInt(line []byte) (*big.Int, error) {
+ i := new(big.Int)
+ if i, ok := i.SetString(string(line[1:]), 10); ok {
+ return i, nil
+ }
+ return nil, fmt.Errorf("redis: can't parse bigInt reply: %q", line)
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return "", err
+ }
+
+ b := make([]byte, n+2)
+ _, err = io.ReadFull(r.rd, b)
+ if err != nil {
+ return "", err
+ }
+
+ return util.BytesToString(b[:n]), nil
+}
+
+func (r *Reader) readVerb(line []byte) (string, error) {
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return "", err
+ }
+ if len(s) < 4 || s[3] != ':' {
+ return "", fmt.Errorf("redis: can't parse verbatim string reply: %q", line)
+ }
+ return s[4:], nil
+}
+
+func (r *Reader) readSlice(line []byte) ([]interface{}, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return nil, err
+ }
+
+ val := make([]interface{}, n)
+ for i := 0; i < len(val); i++ {
+ v, err := r.ReadReply()
+ if err != nil {
+ if err == Nil {
+ val[i] = nil
+ continue
+ }
+ if err, ok := err.(RedisError); ok {
+ val[i] = err
+ continue
+ }
+ return nil, err
+ }
+ val[i] = v
+ }
+ return val, nil
+}
+
+func (r *Reader) readMap(line []byte) (map[interface{}]interface{}, error) {
+ n, err := replyLen(line)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[interface{}]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := r.ReadReply()
+ if err != nil {
+ return nil, err
+ }
+ v, err := r.ReadReply()
+ if err != nil {
+ if err == Nil {
+ m[k] = nil
+ continue
+ }
+ if err, ok := err.(RedisError); ok {
+ m[k] = err
+ continue
+ }
+ return nil, err
+ }
+ m[k] = v
+ }
+ return m, nil
+}
+
+// -------------------------------
+
+func (r *Reader) ReadInt() (int64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespInt, RespStatus:
+ return util.ParseInt(line[1:], 10, 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseInt([]byte(s), 10, 64)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return 0, err
+ }
+ if !b.IsInt64() {
+ return 0, fmt.Errorf("bigInt(%s) value out of range", b.String())
+ }
+ return b.Int64(), nil
+ }
+ return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespInt, RespStatus:
+ return util.ParseUint(line[1:], 10, 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseUint([]byte(s), 10, 64)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return 0, err
+ }
+ if !b.IsUint64() {
+ return 0, fmt.Errorf("bigInt(%s) value out of range", b.String())
+ }
+ return b.Uint64(), nil
+ }
+ return 0, fmt.Errorf("redis: can't parse uint reply: %.100q", line)
+}
+
+func (r *Reader) ReadFloat() (float64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespFloat:
+ return r.readFloat(line)
+ case RespStatus:
+ return strconv.ParseFloat(string(line[1:]), 64)
+ case RespString:
+ s, err := r.readStringReply(line)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseFloat(s, 64)
+ }
+ return 0, fmt.Errorf("redis: can't parse float reply: %.100q", line)
+}
+
+func (r *Reader) ReadString() (string, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+
+ switch line[0] {
+ case RespStatus, RespInt, RespFloat:
+ return string(line[1:]), nil
+ case RespString:
+ return r.readStringReply(line)
+ case RespBool:
+ b, err := r.readBool(line)
+ return strconv.FormatBool(b), err
+ case RespVerbatim:
+ return r.readVerb(line)
+ case RespBigInt:
+ b, err := r.readBigInt(line)
+ if err != nil {
+ return "", err
+ }
+ return b.String(), nil
+ }
+ return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+}
+
+func (r *Reader) ReadBool() (bool, error) {
+ s, err := r.ReadString()
+ if err != nil {
+ return false, err
+ }
+ return s == "OK" || s == "1" || s == "true", nil
+}
+
+func (r *Reader) ReadSlice() ([]interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ return r.readSlice(line)
+}
+
+// ReadFixedArrayLen read fixed array length.
+func (r *Reader) ReadFixedArrayLen(fixedLen int) error {
+ n, err := r.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n != fixedLen {
+ return fmt.Errorf("redis: got %d elements in the array, wanted %d", n, fixedLen)
+ }
+ return nil
+}
+
+// ReadArrayLen Read and return the length of the array.
+func (r *Reader) ReadArrayLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespArray, RespSet, RespPush:
+ return replyLen(line)
+ default:
+ return 0, fmt.Errorf("redis: can't parse array/set/push reply: %.100q", line)
+ }
+}
+
+// ReadFixedMapLen reads fixed map length.
+func (r *Reader) ReadFixedMapLen(fixedLen int) error {
+ n, err := r.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ if n != fixedLen {
+ return fmt.Errorf("redis: got %d elements in the map, wanted %d", n, fixedLen)
+ }
+ return nil
+}
+
+// ReadMapLen reads the length of the map type.
+// If responding to the array type (RespArray/RespSet/RespPush),
+// it must be a multiple of 2 and return n/2.
+// Other types will return an error.
+func (r *Reader) ReadMapLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case RespMap:
+ return replyLen(line)
+ case RespArray, RespSet, RespPush:
+ // Some commands and RESP2 protocol may respond to array types.
+ n, err := replyLen(line)
+ if err != nil {
+ return 0, err
+ }
+ if n%2 != 0 {
+ return 0, fmt.Errorf("redis: the length of the array must be a multiple of 2, got: %d", n)
+ }
+ return n / 2, nil
+ default:
+ return 0, fmt.Errorf("redis: can't parse map reply: %.100q", line)
+ }
+}
+
+// DiscardNext read and discard the data represented by the next line.
+func (r *Reader) DiscardNext() error {
+ line, err := r.readLine()
+ if err != nil {
+ return err
+ }
+ return r.Discard(line)
+}
+
+// Discard the data represented by line.
+func (r *Reader) Discard(line []byte) (err error) {
+ if len(line) == 0 {
+ return errors.New("redis: invalid line")
+ }
+ switch line[0] {
+ case RespStatus, RespError, RespInt, RespNil, RespFloat, RespBool, RespBigInt:
+ return nil
+ }
+
+ n, err := replyLen(line)
+ if err != nil && err != Nil {
+ return err
+ }
+
+ switch line[0] {
+ case RespBlobError, RespString, RespVerbatim:
+ // +\r\n
+ _, err = r.rd.Discard(n + 2)
+ return err
+ case RespArray, RespSet, RespPush:
+ for i := 0; i < n; i++ {
+ if err = r.DiscardNext(); err != nil {
+ return err
+ }
+ }
+ return nil
+ case RespMap, RespAttr:
+ // Read key & value.
+ for i := 0; i < n*2; i++ {
+ if err = r.DiscardNext(); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ return fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func replyLen(line []byte) (n int, err error) {
+ n, err = util.Atoi(line[1:])
+ if err != nil {
+ return 0, err
+ }
+
+ if n < -1 {
+ return 0, fmt.Errorf("redis: invalid reply: %q", line)
+ }
+
+ switch line[0] {
+ case RespString, RespVerbatim, RespBlobError,
+ RespArray, RespSet, RespPush, RespMap, RespAttr:
+ if n == -1 {
+ return 0, Nil
+ }
+ }
+ return n, nil
+}
+
+// IsNilReply detects redis.Nil of RESP2.
+func IsNilReply(line []byte) bool {
+ return len(line) == 3 &&
+ (line[0] == RespString || line[0] == RespArray) &&
+ line[1] == '-' && line[2] == '1'
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/scan.go b/vendor/github.com/redis/go-redis/v9/internal/proto/scan.go
new file mode 100644
index 0000000..5223069
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/proto/scan.go
@@ -0,0 +1,185 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "net"
+ "reflect"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+// Scan parses bytes `b` to `v` with appropriate type.
+//
+//nolint:gocyclo
+func Scan(b []byte, v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return fmt.Errorf("redis: Scan(nil)")
+ case *string:
+ *v = util.BytesToString(b)
+ return nil
+ case *[]byte:
+ *v = b
+ return nil
+ case *int:
+ var err error
+ *v, err = util.Atoi(b)
+ return err
+ case *int8:
+ n, err := util.ParseInt(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = int8(n)
+ return nil
+ case *int16:
+ n, err := util.ParseInt(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = int16(n)
+ return nil
+ case *int32:
+ n, err := util.ParseInt(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = int32(n)
+ return nil
+ case *int64:
+ n, err := util.ParseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *uint:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = uint(n)
+ return nil
+ case *uint8:
+ n, err := util.ParseUint(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = uint8(n)
+ return nil
+ case *uint16:
+ n, err := util.ParseUint(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = uint16(n)
+ return nil
+ case *uint32:
+ n, err := util.ParseUint(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = uint32(n)
+ return nil
+ case *uint64:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *float32:
+ n, err := util.ParseFloat(b, 32)
+ if err != nil {
+ return err
+ }
+ *v = float32(n)
+ return err
+ case *float64:
+ var err error
+ *v, err = util.ParseFloat(b, 64)
+ return err
+ case *bool:
+ *v = len(b) == 1 && b[0] == '1'
+ return nil
+ case *time.Time:
+ var err error
+ *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
+ return err
+ case *time.Duration:
+ n, err := util.ParseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = time.Duration(n)
+ return nil
+ case encoding.BinaryUnmarshaler:
+ return v.UnmarshalBinary(b)
+ case *net.IP:
+ *v = b
+ return nil
+ default:
+ return fmt.Errorf(
+ "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+ }
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+ v := reflect.ValueOf(slice)
+ if !v.IsValid() {
+ return fmt.Errorf("redis: ScanSlice(nil)")
+ }
+ if v.Kind() != reflect.Ptr {
+ return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Slice {
+ return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+ }
+
+ next := makeSliceNextElemFunc(v)
+ for i, s := range data {
+ elem := next()
+ if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+ err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
+ elemType := v.Type().Elem()
+
+ if elemType.Kind() == reflect.Ptr {
+ elemType = elemType.Elem()
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ elem := v.Index(v.Len() - 1)
+ if elem.IsNil() {
+ elem.Set(reflect.New(elemType))
+ }
+ return elem.Elem()
+ }
+
+ elem := reflect.New(elemType)
+ v.Set(reflect.Append(v, elem))
+ return elem.Elem()
+ }
+ }
+
+ zero := reflect.Zero(elemType)
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ return v.Index(v.Len() - 1)
+ }
+
+ v.Set(reflect.Append(v, zero))
+ return v.Index(v.Len() - 1)
+ }
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go b/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go
new file mode 100644
index 0000000..38e66c6
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go
@@ -0,0 +1,242 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+type writer interface {
+ io.Writer
+ io.ByteWriter
+ // WriteString implement io.StringWriter.
+ WriteString(s string) (n int, err error)
+}
+
+type Writer struct {
+ writer
+
+ lenBuf []byte
+ numBuf []byte
+}
+
+func NewWriter(wr writer) *Writer {
+ return &Writer{
+ writer: wr,
+
+ lenBuf: make([]byte, 64),
+ numBuf: make([]byte, 64),
+ }
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+ if err := w.WriteByte(RespArray); err != nil {
+ return err
+ }
+
+ if err := w.writeLen(len(args)); err != nil {
+ return err
+ }
+
+ for _, arg := range args {
+ if err := w.WriteArg(arg); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+ w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+ w.lenBuf = append(w.lenBuf, '\r', '\n')
+ _, err := w.Write(w.lenBuf)
+ return err
+}
+
+func (w *Writer) WriteArg(v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return w.string("")
+ case string:
+ return w.string(v)
+ case *string:
+ if v == nil {
+ return w.string("")
+ }
+ return w.string(*v)
+ case []byte:
+ return w.bytes(v)
+ case int:
+ return w.int(int64(v))
+ case *int:
+ if v == nil {
+ return w.int(0)
+ }
+ return w.int(int64(*v))
+ case int8:
+ return w.int(int64(v))
+ case *int8:
+ if v == nil {
+ return w.int(0)
+ }
+ return w.int(int64(*v))
+ case int16:
+ return w.int(int64(v))
+ case *int16:
+ if v == nil {
+ return w.int(0)
+ }
+ return w.int(int64(*v))
+ case int32:
+ return w.int(int64(v))
+ case *int32:
+ if v == nil {
+ return w.int(0)
+ }
+ return w.int(int64(*v))
+ case int64:
+ return w.int(v)
+ case *int64:
+ if v == nil {
+ return w.int(0)
+ }
+ return w.int(*v)
+ case uint:
+ return w.uint(uint64(v))
+ case *uint:
+ if v == nil {
+ return w.uint(0)
+ }
+ return w.uint(uint64(*v))
+ case uint8:
+ return w.uint(uint64(v))
+ case *uint8:
+ if v == nil {
+ return w.string("")
+ }
+ return w.uint(uint64(*v))
+ case uint16:
+ return w.uint(uint64(v))
+ case *uint16:
+ if v == nil {
+ return w.uint(0)
+ }
+ return w.uint(uint64(*v))
+ case uint32:
+ return w.uint(uint64(v))
+ case *uint32:
+ if v == nil {
+ return w.uint(0)
+ }
+ return w.uint(uint64(*v))
+ case uint64:
+ return w.uint(v)
+ case *uint64:
+ if v == nil {
+ return w.uint(0)
+ }
+ return w.uint(*v)
+ case float32:
+ return w.float(float64(v))
+ case *float32:
+ if v == nil {
+ return w.float(0)
+ }
+ return w.float(float64(*v))
+ case float64:
+ return w.float(v)
+ case *float64:
+ if v == nil {
+ return w.float(0)
+ }
+ return w.float(*v)
+ case bool:
+ if v {
+ return w.int(1)
+ }
+ return w.int(0)
+ case *bool:
+ if v == nil {
+ return w.int(0)
+ }
+ if *v {
+ return w.int(1)
+ }
+ return w.int(0)
+ case time.Time:
+ w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
+ return w.bytes(w.numBuf)
+ case *time.Time:
+ if v == nil {
+ v = &time.Time{}
+ }
+ w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
+ return w.bytes(w.numBuf)
+ case time.Duration:
+ return w.int(v.Nanoseconds())
+ case *time.Duration:
+ if v == nil {
+ return w.int(0)
+ }
+ return w.int(v.Nanoseconds())
+ case encoding.BinaryMarshaler:
+ b, err := v.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ return w.bytes(b)
+ case net.IP:
+ return w.bytes(v)
+ default:
+ return fmt.Errorf(
+ "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+ }
+}
+
+func (w *Writer) bytes(b []byte) error {
+ if err := w.WriteByte(RespString); err != nil {
+ return err
+ }
+
+ if err := w.writeLen(len(b)); err != nil {
+ return err
+ }
+
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+
+ return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+ return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+ w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+ w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+ w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+ if err := w.WriteByte('\r'); err != nil {
+ return err
+ }
+ return w.WriteByte('\n')
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/rand/rand.go b/vendor/github.com/redis/go-redis/v9/internal/rand/rand.go
new file mode 100644
index 0000000..2edccba
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/rand/rand.go
@@ -0,0 +1,50 @@
+package rand
+
+import (
+ "math/rand"
+ "sync"
+)
+
+// Int returns a non-negative pseudo-random int.
+func Int() int { return pseudo.Int() }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Intn(n int) int { return pseudo.Intn(n) }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return pseudo.Int63n(n) }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func Perm(n int) []int { return pseudo.Perm(n) }
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as if
+// seeded by Seed(1).
+func Seed(n int64) { pseudo.Seed(n) }
+
+var pseudo = rand.New(&source{src: rand.NewSource(1)})
+
+type source struct {
+ src rand.Source
+ mu sync.Mutex
+}
+
+func (s *source) Int63() int64 {
+ s.mu.Lock()
+ n := s.src.Int63()
+ s.mu.Unlock()
+ return n
+}
+
+func (s *source) Seed(seed int64) {
+ s.mu.Lock()
+ s.src.Seed(seed)
+ s.mu.Unlock()
+}
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
diff --git a/vendor/github.com/redis/go-redis/v9/internal/util.go b/vendor/github.com/redis/go-redis/v9/internal/util.go
new file mode 100644
index 0000000..f77775f
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/util.go
@@ -0,0 +1,113 @@
+package internal
+
+import (
+ "context"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func ToLower(s string) string {
+ if isLower(s) {
+ return s
+ }
+
+ b := make([]byte, len(s))
+ for i := range b {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+func ReplaceSpaces(s string) string {
+ return strings.ReplaceAll(s, " ", "-")
+}
+
+func GetAddr(addr string) string {
+ ind := strings.LastIndexByte(addr, ':')
+ if ind == -1 {
+ return ""
+ }
+
+ if strings.IndexByte(addr, '.') != -1 {
+ return addr
+ }
+
+ if addr[0] == '[' {
+ return addr
+ }
+ return net.JoinHostPort(addr[:ind], addr[ind+1:])
+}
+
+func ToInteger(val interface{}) int {
+ switch v := val.(type) {
+ case int:
+ return v
+ case int64:
+ return int(v)
+ case string:
+ i, _ := strconv.Atoi(v)
+ return i
+ default:
+ return 0
+ }
+}
+
+func ToFloat(val interface{}) float64 {
+ switch v := val.(type) {
+ case float64:
+ return v
+ case string:
+ f, _ := strconv.ParseFloat(v, 64)
+ return f
+ default:
+ return 0.0
+ }
+}
+
+func ToString(val interface{}) string {
+ if str, ok := val.(string); ok {
+ return str
+ }
+ return ""
+}
+
+func ToStringSlice(val interface{}) []string {
+ if arr, ok := val.([]interface{}); ok {
+ result := make([]string, len(arr))
+ for i, v := range arr {
+ result[i] = ToString(v)
+ }
+ return result
+ }
+ return nil
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/convert.go b/vendor/github.com/redis/go-redis/v9/internal/util/convert.go
new file mode 100644
index 0000000..d326d50
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/util/convert.go
@@ -0,0 +1,30 @@
+package util
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+)
+
+// ParseFloat parses a Redis RESP3 float reply into a Go float64,
+// handling "inf", "-inf", "nan" per Redis conventions.
+func ParseStringToFloat(s string) (float64, error) {
+ switch s {
+ case "inf":
+ return math.Inf(1), nil
+ case "-inf":
+ return math.Inf(-1), nil
+ case "nan", "-nan":
+ return math.NaN(), nil
+ }
+ return strconv.ParseFloat(s, 64)
+}
+
+// MustParseFloat is like ParseFloat but panics on parse errors.
+func MustParseFloat(s string) float64 {
+ f, err := ParseStringToFloat(s)
+ if err != nil {
+ panic(fmt.Sprintf("redis: failed to parse float %q: %v", s, err))
+ }
+ return f
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/safe.go b/vendor/github.com/redis/go-redis/v9/internal/util/safe.go
new file mode 100644
index 0000000..8178f86
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/util/safe.go
@@ -0,0 +1,11 @@
+//go:build appengine
+
+package util
+
+func BytesToString(b []byte) string {
+ return string(b)
+}
+
+func StringToBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/strconv.go b/vendor/github.com/redis/go-redis/v9/internal/util/strconv.go
new file mode 100644
index 0000000..db50338
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/util/strconv.go
@@ -0,0 +1,19 @@
+package util
+
+import "strconv"
+
+func Atoi(b []byte) (int, error) {
+ return strconv.Atoi(BytesToString(b))
+}
+
+func ParseInt(b []byte, base int, bitSize int) (int64, error) {
+ return strconv.ParseInt(BytesToString(b), base, bitSize)
+}
+
+func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
+ return strconv.ParseUint(BytesToString(b), base, bitSize)
+}
+
+func ParseFloat(b []byte, bitSize int) (float64, error) {
+ return strconv.ParseFloat(BytesToString(b), bitSize)
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/type.go b/vendor/github.com/redis/go-redis/v9/internal/util/type.go
new file mode 100644
index 0000000..a7ea712
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/util/type.go
@@ -0,0 +1,5 @@
+package util
+
+func ToPtr[T any](v T) *T {
+ return &v
+}
diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/unsafe.go b/vendor/github.com/redis/go-redis/v9/internal/util/unsafe.go
new file mode 100644
index 0000000..cbcd2cc
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/internal/util/unsafe.go
@@ -0,0 +1,22 @@
+//go:build !appengine
+
+package util
+
+import (
+ "unsafe"
+)
+
+// BytesToString converts byte slice to string.
+func BytesToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/redis/go-redis/v9/iterator.go b/vendor/github.com/redis/go-redis/v9/iterator.go
new file mode 100644
index 0000000..cd1a828
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/iterator.go
@@ -0,0 +1,66 @@
+package redis
+
+import (
+ "context"
+)
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+type ScanIterator struct {
+ cmd *ScanCmd
+ pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+ return it.cmd.Err()
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next(ctx context.Context) bool {
+ // Instantly return on errors.
+ if it.cmd.Err() != nil {
+ return false
+ }
+
+ // Advance cursor, check if we are still within range.
+ if it.pos < len(it.cmd.page) {
+ it.pos++
+ return true
+ }
+
+ for {
+ // Return if there is no more data to fetch.
+ if it.cmd.cursor == 0 {
+ return false
+ }
+
+ // Fetch next page.
+ switch it.cmd.args[0] {
+ case "scan", "qscan":
+ it.cmd.args[1] = it.cmd.cursor
+ default:
+ it.cmd.args[2] = it.cmd.cursor
+ }
+
+ err := it.cmd.process(ctx, it.cmd)
+ if err != nil {
+ return false
+ }
+
+ it.pos = 1
+
+ // Redis can occasionally return empty page.
+ if len(it.cmd.page) > 0 {
+ return true
+ }
+ }
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+ var v string
+ if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+ v = it.cmd.page[it.pos-1]
+ }
+ return v
+}
diff --git a/vendor/github.com/redis/go-redis/v9/json.go b/vendor/github.com/redis/go-redis/v9/json.go
new file mode 100644
index 0000000..b3cadf4
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/json.go
@@ -0,0 +1,599 @@
+package redis
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+// -------------------------------------------
+
+type JSONCmdable interface {
+ JSONArrAppend(ctx context.Context, key, path string, values ...interface{}) *IntSliceCmd
+ JSONArrIndex(ctx context.Context, key, path string, value ...interface{}) *IntSliceCmd
+ JSONArrIndexWithArgs(ctx context.Context, key, path string, options *JSONArrIndexArgs, value ...interface{}) *IntSliceCmd
+ JSONArrInsert(ctx context.Context, key, path string, index int64, values ...interface{}) *IntSliceCmd
+ JSONArrLen(ctx context.Context, key, path string) *IntSliceCmd
+ JSONArrPop(ctx context.Context, key, path string, index int) *StringSliceCmd
+ JSONArrTrim(ctx context.Context, key, path string) *IntSliceCmd
+ JSONArrTrimWithArgs(ctx context.Context, key, path string, options *JSONArrTrimArgs) *IntSliceCmd
+ JSONClear(ctx context.Context, key, path string) *IntCmd
+ JSONDebugMemory(ctx context.Context, key, path string) *IntCmd
+ JSONDel(ctx context.Context, key, path string) *IntCmd
+ JSONForget(ctx context.Context, key, path string) *IntCmd
+ JSONGet(ctx context.Context, key string, paths ...string) *JSONCmd
+ JSONGetWithArgs(ctx context.Context, key string, options *JSONGetArgs, paths ...string) *JSONCmd
+ JSONMerge(ctx context.Context, key, path string, value string) *StatusCmd
+ JSONMSetArgs(ctx context.Context, docs []JSONSetArgs) *StatusCmd
+ JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd
+ JSONMGet(ctx context.Context, path string, keys ...string) *JSONSliceCmd
+ JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd
+ JSONObjKeys(ctx context.Context, key, path string) *SliceCmd
+ JSONObjLen(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONSet(ctx context.Context, key, path string, value interface{}) *StatusCmd
+ JSONSetMode(ctx context.Context, key, path string, value interface{}, mode string) *StatusCmd
+ JSONStrAppend(ctx context.Context, key, path, value string) *IntPointerSliceCmd
+ JSONStrLen(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONToggle(ctx context.Context, key, path string) *IntPointerSliceCmd
+ JSONType(ctx context.Context, key, path string) *JSONSliceCmd
+}
+
+type JSONSetArgs struct {
+ Key string
+ Path string
+ Value interface{}
+}
+
+type JSONArrIndexArgs struct {
+ Start int
+ Stop *int
+}
+
+type JSONArrTrimArgs struct {
+ Start int
+ Stop *int
+}
+
+type JSONCmd struct {
+ baseCmd
+ val string
+ expanded interface{}
+}
+
+var _ Cmder = (*JSONCmd)(nil)
+
+func newJSONCmd(ctx context.Context, args ...interface{}) *JSONCmd {
+ return &JSONCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *JSONCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *JSONCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *JSONCmd) Val() string {
+ if len(cmd.val) == 0 && cmd.expanded != nil {
+ val, err := json.Marshal(cmd.expanded)
+ if err != nil {
+ cmd.SetErr(err)
+ return ""
+ }
+ return string(val)
+
+ } else {
+ return cmd.val
+ }
+}
+
+func (cmd *JSONCmd) Result() (string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *JSONCmd) Expanded() (interface{}, error) {
+ if len(cmd.val) != 0 && cmd.expanded == nil {
+ err := json.Unmarshal([]byte(cmd.val), &cmd.expanded)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return cmd.expanded, nil
+}
+
+func (cmd *JSONCmd) readReply(rd *proto.Reader) error {
+ // nil response from JSON.(M)GET (cmd.baseCmd.err will be "redis: nil")
+ if cmd.baseCmd.Err() == Nil {
+ cmd.val = ""
+ return Nil
+ }
+
+ if readType, err := rd.PeekReplyType(); err != nil {
+ return err
+ } else if readType == proto.RespArray {
+
+ size, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ expanded := make([]interface{}, size)
+
+ for i := 0; i < size; i++ {
+ if expanded[i], err = rd.ReadReply(); err != nil {
+ return err
+ }
+ }
+ cmd.expanded = expanded
+
+ } else {
+ if str, err := rd.ReadString(); err != nil && err != Nil {
+ return err
+ } else if str == "" || err == Nil {
+ cmd.val = ""
+ } else {
+ cmd.val = str
+ }
+ }
+
+ return nil
+}
+
+// -------------------------------------------
+
+type JSONSliceCmd struct {
+ baseCmd
+ val []interface{}
+}
+
+func NewJSONSliceCmd(ctx context.Context, args ...interface{}) *JSONSliceCmd {
+ return &JSONSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *JSONSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *JSONSliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *JSONSliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *JSONSliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *JSONSliceCmd) readReply(rd *proto.Reader) error {
+ if cmd.baseCmd.Err() == Nil {
+ cmd.val = nil
+ return Nil
+ }
+
+ if readType, err := rd.PeekReplyType(); err != nil {
+ return err
+ } else if readType == proto.RespArray {
+ response, err := rd.ReadReply()
+ if err != nil {
+ return nil
+ } else {
+ cmd.val = response.([]interface{})
+ }
+
+ } else {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]interface{}, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ }
+ return nil
+}
+
+/*******************************************************************************
+*
+* IntPointerSliceCmd
+* used to represent a RedisJSON response where the result is either an integer or nil
+*
+*******************************************************************************/
+
+type IntPointerSliceCmd struct {
+ baseCmd
+ val []*int64
+}
+
+// NewIntPointerSliceCmd initialises an IntPointerSliceCmd
+func NewIntPointerSliceCmd(ctx context.Context, args ...interface{}) *IntPointerSliceCmd {
+ return &IntPointerSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntPointerSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntPointerSliceCmd) SetVal(val []*int64) {
+ cmd.val = val
+}
+
+func (cmd *IntPointerSliceCmd) Val() []*int64 {
+ return cmd.val
+}
+
+func (cmd *IntPointerSliceCmd) Result() ([]*int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntPointerSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*int64, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ val, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ } else if err != Nil {
+ cmd.val[i] = &val
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// JSONArrAppend adds the provided JSON values to the end of the array at the given path.
+// For more information, see https://redis.io/commands/json.arrappend
+func (c cmdable) JSONArrAppend(ctx context.Context, key, path string, values ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRAPPEND", key, path}
+ args = append(args, values...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrIndex searches for the first occurrence of the provided JSON value in the array at the given path.
+// For more information, see https://redis.io/commands/json.arrindex
+func (c cmdable) JSONArrIndex(ctx context.Context, key, path string, value ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINDEX", key, path}
+ args = append(args, value...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrIndexWithArgs searches for the first occurrence of a JSON value in an array while allowing the start and
+// stop options to be provided.
+// For more information, see https://redis.io/commands/json.arrindex
+func (c cmdable) JSONArrIndexWithArgs(ctx context.Context, key, path string, options *JSONArrIndexArgs, value ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINDEX", key, path}
+ args = append(args, value...)
+
+ if options != nil {
+ args = append(args, options.Start)
+ if options.Stop != nil {
+ args = append(args, *options.Stop)
+ }
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrInsert inserts the JSON values into the array at the specified path before the index (shifts to the right).
+// For more information, see https://redis.io/commands/json.arrinsert
+func (c cmdable) JSONArrInsert(ctx context.Context, key, path string, index int64, values ...interface{}) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRINSERT", key, path, index}
+ args = append(args, values...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrLen reports the length of the JSON array at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.arrlen
+func (c cmdable) JSONArrLen(ctx context.Context, key, path string) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRLEN", key, path}
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrPop removes and returns an element from the specified index in the array.
+// For more information, see https://redis.io/commands/json.arrpop
+func (c cmdable) JSONArrPop(ctx context.Context, key, path string, index int) *StringSliceCmd {
+ args := []interface{}{"JSON.ARRPOP", key, path, index}
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrTrim trims an array to contain only the specified inclusive range of elements.
+// For more information, see https://redis.io/commands/json.arrtrim
+func (c cmdable) JSONArrTrim(ctx context.Context, key, path string) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRTRIM", key, path}
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONArrTrimWithArgs trims an array to contain only the specified inclusive range of elements.
+// For more information, see https://redis.io/commands/json.arrtrim
+func (c cmdable) JSONArrTrimWithArgs(ctx context.Context, key, path string, options *JSONArrTrimArgs) *IntSliceCmd {
+ args := []interface{}{"JSON.ARRTRIM", key, path}
+
+ if options != nil {
+ args = append(args, options.Start)
+
+ if options.Stop != nil {
+ args = append(args, *options.Stop)
+ }
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONClear clears container values (arrays/objects) and sets numeric values to 0.
+// For more information, see https://redis.io/commands/json.clear
+func (c cmdable) JSONClear(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.CLEAR", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONDebugMemory reports a value's memory usage in bytes (unimplemented)
+// For more information, see https://redis.io/commands/json.debug-memory
+func (c cmdable) JSONDebugMemory(ctx context.Context, key, path string) *IntCmd {
+ panic("not implemented")
+}
+
+// JSONDel deletes a value.
+// For more information, see https://redis.io/commands/json.del
+func (c cmdable) JSONDel(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.DEL", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONForget deletes a value.
+// For more information, see https://redis.io/commands/json.forget
+func (c cmdable) JSONForget(ctx context.Context, key, path string) *IntCmd {
+ args := []interface{}{"JSON.FORGET", key, path}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONGet returns the value at path in JSON serialized form. JSON.GET returns an
+// array of strings. This function parses out the wrapping array but leaves the
+// internal strings unprocessed by default (see Val())
+// For more information - https://redis.io/commands/json.get/
+func (c cmdable) JSONGet(ctx context.Context, key string, paths ...string) *JSONCmd {
+ args := make([]interface{}, len(paths)+2)
+ args[0] = "JSON.GET"
+ args[1] = key
+ for n, path := range paths {
+ args[n+2] = path
+ }
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type JSONGetArgs struct {
+ Indent string
+ Newline string
+ Space string
+}
+
+// JSONGetWithArgs - Retrieves the value of a key from a JSON document.
+// This function also allows for specifying additional options such as:
+// Indention, NewLine and Space
+// For more information - https://redis.io/commands/json.get/
+func (c cmdable) JSONGetWithArgs(ctx context.Context, key string, options *JSONGetArgs, paths ...string) *JSONCmd {
+ args := []interface{}{"JSON.GET", key}
+ if options != nil {
+ if options.Indent != "" {
+ args = append(args, "INDENT", options.Indent)
+ }
+ if options.Newline != "" {
+ args = append(args, "NEWLINE", options.Newline)
+ }
+ if options.Space != "" {
+ args = append(args, "SPACE", options.Space)
+ }
+ for _, path := range paths {
+ args = append(args, path)
+ }
+ }
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMerge merges a given JSON value into matching paths.
+// For more information, see https://redis.io/commands/json.merge
+func (c cmdable) JSONMerge(ctx context.Context, key, path string, value string) *StatusCmd {
+ args := []interface{}{"JSON.MERGE", key, path, value}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMGet returns the values at the specified path from multiple key arguments.
+// Note - the arguments are reversed when compared with `JSON.MGET` as we want
+// to follow the pattern of having the last argument be variable.
+// For more information, see https://redis.io/commands/json.mget
+func (c cmdable) JSONMGet(ctx context.Context, path string, keys ...string) *JSONSliceCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "JSON.MGET"
+ for n, key := range keys {
+ args[n+1] = key
+ }
+ args = append(args, path)
+ cmd := NewJSONSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONMSetArgs sets or updates one or more JSON values according to the specified key-path-value triplets.
+// For more information, see https://redis.io/commands/json.mset
+func (c cmdable) JSONMSetArgs(ctx context.Context, docs []JSONSetArgs) *StatusCmd {
+ args := []interface{}{"JSON.MSET"}
+ for _, doc := range docs {
+ args = append(args, doc.Key, doc.Path, doc.Value)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd {
+ args := []interface{}{"JSON.MSET"}
+ args = append(args, params...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONNumIncrBy increments the number value stored at the specified path by the provided number.
+// For more information, see https://redis.io/docs/latest/commands/json.numincrby/
+func (c cmdable) JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd {
+ args := []interface{}{"JSON.NUMINCRBY", key, path, value}
+ cmd := newJSONCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONObjKeys returns the keys in the object that's referenced by the specified path.
+// For more information, see https://redis.io/commands/json.objkeys
+func (c cmdable) JSONObjKeys(ctx context.Context, key, path string) *SliceCmd {
+ args := []interface{}{"JSON.OBJKEYS", key, path}
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONObjLen reports the number of keys in the JSON object at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.objlen
+func (c cmdable) JSONObjLen(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.OBJLEN", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONSet sets the JSON value at the given path in the given key. The value must be something that
+// can be marshaled to JSON (using encoding/JSON) unless the argument is a string or a []byte when we assume that
+// it can be passed directly as JSON.
+// For more information, see https://redis.io/commands/json.set
+func (c cmdable) JSONSet(ctx context.Context, key, path string, value interface{}) *StatusCmd {
+ return c.JSONSetMode(ctx, key, path, value, "")
+}
+
+// JSONSetMode sets the JSON value at the given path in the given key and allows the mode to be set
+// (the mode value must be "XX" or "NX"). The value must be something that can be marshaled to JSON (using encoding/JSON) unless
+// the argument is a string or []byte when we assume that it can be passed directly as JSON.
+// For more information, see https://redis.io/commands/json.set
+func (c cmdable) JSONSetMode(ctx context.Context, key, path string, value interface{}, mode string) *StatusCmd {
+ var bytes []byte
+ var err error
+ switch v := value.(type) {
+ case string:
+ bytes = []byte(v)
+ case []byte:
+ bytes = v
+ default:
+ bytes, err = json.Marshal(v)
+ }
+ args := []interface{}{"JSON.SET", key, path, util.BytesToString(bytes)}
+ if mode != "" {
+ switch strings.ToUpper(mode) {
+ case "XX", "NX":
+ args = append(args, strings.ToUpper(mode))
+
+ default:
+ panic("redis: JSON.SET mode must be NX or XX")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ _ = c(ctx, cmd)
+ }
+ return cmd
+}
+
+// JSONStrAppend appends the JSON-string values to the string at the specified path.
+// For more information, see https://redis.io/commands/json.strappend
+func (c cmdable) JSONStrAppend(ctx context.Context, key, path, value string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.STRAPPEND", key, path, value}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONStrLen reports the length of the JSON String at the specified path in the given key.
+// For more information, see https://redis.io/commands/json.strlen
+func (c cmdable) JSONStrLen(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.STRLEN", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONToggle toggles a Boolean value stored at the specified path.
+// For more information, see https://redis.io/commands/json.toggle
+func (c cmdable) JSONToggle(ctx context.Context, key, path string) *IntPointerSliceCmd {
+ args := []interface{}{"JSON.TOGGLE", key, path}
+ cmd := NewIntPointerSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// JSONType reports the type of JSON value at the specified path.
+// For more information, see https://redis.io/commands/json.type
+func (c cmdable) JSONType(ctx context.Context, key, path string) *JSONSliceCmd {
+ args := []interface{}{"JSON.TYPE", key, path}
+ cmd := NewJSONSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/list_commands.go b/vendor/github.com/redis/go-redis/v9/list_commands.go
new file mode 100644
index 0000000..24a0de0
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/list_commands.go
@@ -0,0 +1,289 @@
+package redis
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+type ListCmdable interface {
+ BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd
+ BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+ LIndex(ctx context.Context, key string, index int64) *StringCmd
+ LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LLen(ctx context.Context, key string) *IntCmd
+ LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd
+ LPop(ctx context.Context, key string) *StringCmd
+ LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+ LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+ LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+ LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+ LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+ LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+ RPop(ctx context.Context, key string) *StringCmd
+ RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+ RPopLPush(ctx context.Context, source, destination string) *StringCmd
+ RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+ BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
+}
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "blmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(direction), "count", count)
+ cmd := NewKeyValuesCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ ctx,
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "lindex", key, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// LMPop Pops one or more elements from the first non-empty list key from the list of provided key names.
+// direction: left or right, count: > 0
+// example: client.LMPop(ctx, "left", 3, "key1", "key2")
+func (c cmdable) LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "lmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(direction), "count", count)
+ cmd := NewKeyValuesCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "llen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type LPosArgs struct {
+ Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+ args := []interface{}{"lpos", key, value}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+ args := []interface{}{"lpos", key, value, "count", count}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ ctx,
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "lrem", key, count, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "lset", key, index, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BLMove(
+ ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+ cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/options.go b/vendor/github.com/redis/go-redis/v9/options.go
new file mode 100644
index 0000000..b87a234
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/options.go
@@ -0,0 +1,596 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/redis/go-redis/v9/auth"
+ "github.com/redis/go-redis/v9/internal/pool"
+)
+
+// Limiter is the interface of a rate limiter or a circuit breaker.
+type Limiter interface {
+ // Allow returns nil if operation is allowed or an error otherwise.
+ // If operation is allowed client must ReportResult of the operation
+ // whether it is a success or a failure.
+ Allow() error
+ // ReportResult reports the result of the previously allowed operation.
+ // nil indicates a success, non-nil error usually indicates a failure.
+ ReportResult(result error)
+}
+
+// Options keeps the settings to set up redis connection.
+type Options struct {
+
+ // Network type, either tcp or unix.
+ //
+ // default: is tcp.
+ Network string
+
+ // Addr is the address formated as host:port
+ Addr string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // Dialer creates new network connection and has priority over
+ // Network and Addr options.
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Hook that is called when new connection is established.
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ // Protocol 2 or 3. Use the version to negotiate RESP version with redis-server.
+ //
+ // default: 3.
+ Protocol int
+
+ // Username is used to authenticate the current connection
+ // with one of the connections defined in the ACL list when connecting
+ // to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+ Username string
+
+ // Password is an optional password. Must match the password specified in the
+ // `requirepass` server configuration option (if connecting to a Redis 5.0 instance, or lower),
+ // or the User Password when connecting to a Redis 6.0 instance, or greater,
+ // that is using the Redis ACL system.
+ Password string
+
+ // CredentialsProvider allows the username and password to be updated
+ // before reconnecting. It should return the current username and password.
+ CredentialsProvider func() (username string, password string)
+
+ // CredentialsProviderContext is an enhanced parameter of CredentialsProvider,
+ // done to maintain API compatibility. In the future,
+ // there might be a merge between CredentialsProviderContext and CredentialsProvider.
+ // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider.
+ CredentialsProviderContext func(ctx context.Context) (username string, password string, err error)
+
+ // StreamingCredentialsProvider is used to retrieve the credentials
+ // for the connection from an external source. Those credentials may change
+ // during the connection lifetime. This is useful for managed identity
+ // scenarios where the credentials are retrieved from an external source.
+ //
+ // Currently, this is a placeholder for the future implementation.
+ StreamingCredentialsProvider auth.StreamingCredentialsProvider
+
+ // DB is the database to be selected after connecting to the server.
+ DB int
+
+ // MaxRetries is the maximum number of retries before giving up.
+ // -1 (not 0) disables retries.
+ //
+ // default: 3 retries
+ MaxRetries int
+
+ // MinRetryBackoff is the minimum backoff between each retry.
+ // -1 disables backoff.
+ //
+ // default: 8 milliseconds
+ MinRetryBackoff time.Duration
+
+ // MaxRetryBackoff is the maximum backoff between each retry.
+ // -1 disables backoff.
+ // default: 512 milliseconds;
+ MaxRetryBackoff time.Duration
+
+ // DialTimeout for establishing new connections.
+ //
+ // default: 5 seconds
+ DialTimeout time.Duration
+
+ // ReadTimeout for socket reads. If reached, commands will fail
+ // with a timeout instead of blocking. Supported values:
+ //
+ // - `-1` - no timeout (block indefinitely).
+ // - `-2` - disables SetReadDeadline calls completely.
+ //
+ // default: 3 seconds
+ ReadTimeout time.Duration
+
+ // WriteTimeout for socket writes. If reached, commands will fail
+ // with a timeout instead of blocking. Supported values:
+ //
+ // - `-1` - no timeout (block indefinitely).
+ // - `-2` - disables SetWriteDeadline calls completely.
+ //
+ // default: 3 seconds
+ WriteTimeout time.Duration
+
+ // ContextTimeoutEnabled controls whether the client respects context timeouts and deadlines.
+ // See https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts
+ ContextTimeoutEnabled bool
+
+ // PoolFIFO type of connection pool.
+ //
+ // - true for FIFO pool
+ // - false for LIFO pool.
+ //
+ // Note that FIFO has slightly higher overhead compared to LIFO,
+ // but it helps closing idle connections faster reducing the pool size.
+ PoolFIFO bool
+
+ // PoolSize is the base number of socket connections.
+ // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
+ // If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize,
+ // you can limit it through MaxActiveConns
+ //
+ // default: 10 * runtime.GOMAXPROCS(0)
+ PoolSize int
+
+ // PoolTimeout is the amount of time client waits for connection if all connections
+ // are busy before returning an error.
+ //
+ // default: ReadTimeout + 1 second
+ PoolTimeout time.Duration
+
+ // MinIdleConns is the minimum number of idle connections which is useful when establishing
+ // new connection is slow. The idle connections are not closed by default.
+ //
+ // default: 0
+ MinIdleConns int
+
+ // MaxIdleConns is the maximum number of idle connections.
+ // The idle connections are not closed by default.
+ //
+ // default: 0
+ MaxIdleConns int
+
+ // MaxActiveConns is the maximum number of connections allocated by the pool at a given time.
+ // When zero, there is no limit on the number of connections in the pool.
+ // If the pool is full, the next call to Get() will block until a connection is released.
+ MaxActiveConns int
+
+ // ConnMaxIdleTime is the maximum amount of time a connection may be idle.
+ // Should be less than server's timeout.
+ //
+ // Expired connections may be closed lazily before reuse.
+ // If d <= 0, connections are not closed due to a connection's idle time.
+ // -1 disables idle timeout check.
+ //
+ // default: 30 minutes
+ ConnMaxIdleTime time.Duration
+
+ // ConnMaxLifetime is the maximum amount of time a connection may be reused.
+ //
+ // Expired connections may be closed lazily before reuse.
+ // If <= 0, connections are not closed due to a connection's age.
+ //
+ // default: 0
+ ConnMaxLifetime time.Duration
+
+ // TLSConfig to use. When set, TLS will be negotiated.
+ TLSConfig *tls.Config
+
+ // Limiter interface used to implement circuit breaker or rate limiter.
+ Limiter Limiter
+
+ // readOnly enables read only queries on slave/follower nodes.
+ readOnly bool
+
+ // DisableIndentity - Disable set-lib on connect.
+ //
+ // default: false
+ //
+ // Deprecated: Use DisableIdentity instead.
+ DisableIndentity bool
+
+ // DisableIdentity is used to disable CLIENT SETINFO command on connect.
+ //
+ // default: false
+ DisableIdentity bool
+
+ // Add suffix to client name. Default is empty.
+ // IdentitySuffix - add suffix to client name.
+ IdentitySuffix string
+
+ // UnstableResp3 enables Unstable mode for Redis Search module with RESP3.
+ // When unstable mode is enabled, the client will use RESP3 protocol and only be able to use RawResult
+ UnstableResp3 bool
+}
+
+func (opt *Options) init() {
+ if opt.Addr == "" {
+ opt.Addr = "localhost:6379"
+ }
+ if opt.Network == "" {
+ if strings.HasPrefix(opt.Addr, "/") {
+ opt.Network = "unix"
+ } else {
+ opt.Network = "tcp"
+ }
+ }
+ if opt.Protocol < 2 {
+ opt.Protocol = 3
+ }
+ if opt.DialTimeout == 0 {
+ opt.DialTimeout = 5 * time.Second
+ }
+ if opt.Dialer == nil {
+ opt.Dialer = NewDialer(opt)
+ }
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
+ }
+ switch opt.ReadTimeout {
+ case -2:
+ opt.ReadTimeout = -1
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -2:
+ opt.WriteTimeout = -1
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+ if opt.PoolTimeout == 0 {
+ if opt.ReadTimeout > 0 {
+ opt.PoolTimeout = opt.ReadTimeout + time.Second
+ } else {
+ opt.PoolTimeout = 30 * time.Second
+ }
+ }
+ if opt.ConnMaxIdleTime == 0 {
+ opt.ConnMaxIdleTime = 30 * time.Minute
+ }
+
+ switch opt.MaxRetries {
+ case -1:
+ opt.MaxRetries = 0
+ case 0:
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *Options) clone() *Options {
+ clone := *opt
+ return &clone
+}
+
+// NewDialer returns a function that will be used as the default dialer
+// when none is specified in Options.Dialer.
+func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, error) {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ netDialer := &net.Dialer{
+ Timeout: opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+ }
+}
+
+// ParseURL parses a URL into Options that can be used to connect to Redis.
+// Scheme is required.
+// There are two connection types: by tcp socket and by unix socket.
+// Tcp connection:
+//
+// redis://:@:/
+//
+// Unix connection:
+//
+// unix://:@?db=
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is interpreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query parameters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+// - use "skip_verify=true" to ignore TLS certificate validation
+//
+// Examples:
+//
+// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+// is equivalent to:
+// &Options{
+// Network: "tcp",
+// Addr: "localhost:6789",
+// DB: 1, // path "/3" was overridden by "&db=1"
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// MaxRetries: 2,
+// }
+func ParseURL(redisURL string) (*Options, error) {
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ switch u.Scheme {
+ case "redis", "rediss":
+ return setupTCPConn(u)
+ case "unix":
+ return setupUnixConn(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+}
+
+func setupTCPConn(u *url.URL) (*Options, error) {
+ o := &Options{Network: "tcp"}
+
+ o.Username, o.Password = getUserPassword(u)
+
+ h, p := getHostPortWithDefaults(u)
+ o.Addr = net.JoinHostPort(h, p)
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ var err error
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+ }
+ default:
+ return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+ }
+
+ if u.Scheme == "rediss" {
+ o.TLSConfig = &tls.Config{
+ ServerName: h,
+ MinVersion: tls.VersionTLS12,
+ }
+ }
+
+ return setupConnParams(u, o)
+}
+
+// getHostPortWithDefaults is a helper function that splits the url into
+// a host and a port. If the host is missing, it defaults to localhost
+// and if the port is missing, it defaults to 6379.
+func getHostPortWithDefaults(u *url.URL) (string, string) {
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host = u.Host
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ if port == "" {
+ port = "6379"
+ }
+ return host, port
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+ o := &Options{
+ Network: "unix",
+ }
+
+ if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+ return nil, errors.New("redis: empty unix socket path")
+ }
+ o.Addr = u.Path
+ o.Username, o.Password = getUserPassword(u)
+ return setupConnParams(u, o)
+}
+
+type queryOptions struct {
+ q url.Values
+ err error
+}
+
+func (o *queryOptions) has(name string) bool {
+ return len(o.q[name]) > 0
+}
+
+func (o *queryOptions) string(name string) string {
+ vs := o.q[name]
+ if len(vs) == 0 {
+ return ""
+ }
+ delete(o.q, name) // enable detection of unknown parameters
+ return vs[len(vs)-1]
+}
+
+func (o *queryOptions) strings(name string) []string {
+ vs := o.q[name]
+ delete(o.q, name)
+ return vs
+}
+
+func (o *queryOptions) int(name string) int {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ i, err := strconv.Atoi(s)
+ if err == nil {
+ return i
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ // try plain number first
+ if i, err := strconv.Atoi(s); err == nil {
+ if i <= 0 {
+ // disable timeouts
+ return -1
+ }
+ return time.Duration(i) * time.Second
+ }
+ dur, err := time.ParseDuration(s)
+ if err == nil {
+ return dur
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+ switch s := o.string(name); s {
+ case "true", "1":
+ return true
+ case "false", "0", "":
+ return false
+ default:
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+ }
+ return false
+ }
+}
+
+func (o *queryOptions) remaining() []string {
+ if len(o.q) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(o.q))
+ for k := range o.q {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+ q := queryOptions{q: u.Query()}
+
+ // compat: a future major release may use q.int("db")
+ if tmp := q.string("db"); tmp != "" {
+ db, err := strconv.Atoi(tmp)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %w", err)
+ }
+ o.DB = db
+ }
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ if q.has("conn_max_idle_time") {
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+ } else {
+ o.ConnMaxIdleTime = q.duration("idle_timeout")
+ }
+ if q.has("conn_max_lifetime") {
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ } else {
+ o.ConnMaxLifetime = q.duration("max_conn_age")
+ }
+ if q.err != nil {
+ return nil, q.err
+ }
+ if o.TLSConfig != nil && q.has("skip_verify") {
+ o.TLSConfig.InsecureSkipVerify = q.bool("skip_verify")
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func getUserPassword(u *url.URL) (string, string) {
+ var user, password string
+ if u.User != nil {
+ user = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ password = p
+ }
+ }
+ return user, password
+}
+
+func newConnPool(
+ opt *Options,
+ dialer func(ctx context.Context, network, addr string) (net.Conn, error),
+) *pool.ConnPool {
+ return pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return dialer(ctx, opt.Network, opt.Addr)
+ },
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ DialTimeout: opt.DialTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+ })
+}
diff --git a/vendor/github.com/redis/go-redis/v9/osscluster.go b/vendor/github.com/redis/go-redis/v9/osscluster.go
new file mode 100644
index 0000000..6c6b756
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/osscluster.go
@@ -0,0 +1,2002 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "net"
+ "net/url"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/auth"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+const (
+ minLatencyMeasurementInterval = 10 * time.Second
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // NewClient creates a cluster node client with provided name and options.
+ NewClient func(opt *Options) *Client
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 3 retries.
+ MaxRedirects int
+
+ // Enables read-only commands on slave nodes.
+ ReadOnly bool
+ // Allows routing read-only commands to the closest master or slave node.
+ // It automatically enables ReadOnly.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
+ RouteRandomly bool
+
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func(context.Context) ([]ClusterSlot, error)
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ CredentialsProvider func() (username string, password string)
+ CredentialsProviderContext func(ctx context.Context) (username string, password string, err error)
+ StreamingCredentialsProvider auth.StreamingCredentialsProvider
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ PoolFIFO bool
+ PoolSize int // applies per cluster node and not for the whole cluster
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int // applies per cluster node and not for the whole cluster
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+
+ // DisableIndentity - Disable set-lib on connect.
+ //
+ // default: false
+ //
+ // Deprecated: Use DisableIdentity instead.
+ DisableIndentity bool
+
+ // DisableIdentity is used to disable CLIENT SETINFO command on connect.
+ //
+ // default: false
+ DisableIdentity bool
+
+ IdentitySuffix string // Add suffix to client name. Default is empty.
+
+ // UnstableResp3 enables Unstable mode for Redis Search module with RESP3.
+ UnstableResp3 bool
+}
+
+func (opt *ClusterOptions) init() {
+ switch opt.MaxRedirects {
+ case -1:
+ opt.MaxRedirects = 0
+ case 0:
+ opt.MaxRedirects = 3
+ }
+
+ if opt.RouteByLatency || opt.RouteRandomly {
+ opt.ReadOnly = true
+ }
+
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
+ }
+
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
+ if opt.MaxRetries == 0 {
+ opt.MaxRetries = -1
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+
+ if opt.NewClient == nil {
+ opt.NewClient = NewClient
+ }
+}
+
+// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis.
+// The URL must be in the form:
+//
+// redis://:@:
+// or
+// rediss://:@:
+//
+// To add additional addresses, specify the query parameter, "addr" one or more times. e.g:
+//
+// redis://:@:?addr=:&addr=:
+// or
+// rediss://:@:?addr=:&addr=:
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is interpreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query parameters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Example:
+//
+// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791
+// is equivalent to:
+// &ClusterOptions{
+// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"]
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// }
+func ParseClusterURL(redisURL string) (*ClusterOptions, error) {
+ o := &ClusterOptions{}
+
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ // add base URL to the array of addresses
+ // more addresses may be added through the URL params
+ h, p := getHostPortWithDefaults(u)
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+
+ // setup username, password, and other configurations
+ o, err = setupClusterConn(u, h, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterConn gets the username and password from the URL and the query parameters.
+func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) {
+ switch u.Scheme {
+ case "rediss":
+ o.TLSConfig = &tls.Config{ServerName: host}
+ fallthrough
+ case "redis":
+ o.Username, o.Password = getUserPassword(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+
+ // retrieve the configuration from the query parameters
+ o, err := setupClusterQueryParams(u, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterQueryParams converts query parameters in u to option value in o.
+func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) {
+ q := queryOptions{q: u.Query()}
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRedirects = q.int("max_redirects")
+ o.ReadOnly = q.bool("read_only")
+ o.RouteByLatency = q.bool("route_by_latency")
+ o.RouteRandomly = q.bool("route_randomly")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // addr can be specified as many times as needed
+ addrs := q.strings("addr")
+ for _, addr := range addrs {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil || h == "" || p == "" {
+ return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr)
+ }
+
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ return &Options{
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+ CredentialsProvider: opt.CredentialsProvider,
+ CredentialsProviderContext: opt.CredentialsProviderContext,
+ StreamingCredentialsProvider: opt.StreamingCredentialsProvider,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+ DisableIdentity: opt.DisableIdentity,
+ DisableIndentity: opt.DisableIdentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ TLSConfig: opt.TLSConfig,
+ // If ClusterSlots is populated, then we probably have an artificial
+ // cluster whose nodes are not in clustering mode (otherwise there isn't
+ // much use for ClusterSlots config). This means we cannot execute the
+ // READONLY command against that node -- setting readOnly to false in such
+ // situations in the options below will prevent that from happening.
+ readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
+ UnstableResp3: opt.UnstableResp3,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+
+ latency uint32 // atomic
+ generation uint32 // atomic
+ failing uint32 // atomic
+
+ // last time the latency measurement was performed for the node, stored in nanoseconds
+ // from epoch
+ lastLatencyMeasurement int64 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: clOpt.NewClient(opt),
+ }
+
+ node.latency = math.MaxUint32
+ if clOpt.RouteByLatency {
+ go node.updateLatency()
+ }
+
+ return &node
+}
+
+func (n *clusterNode) String() string {
+ return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+ return n.Client.Close()
+}
+
+const maximumNodeLatency = 1 * time.Minute
+
+func (n *clusterNode) updateLatency() {
+ const numProbe = 10
+ var dur uint64
+
+ successes := 0
+ for i := 0; i < numProbe; i++ {
+ time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
+
+ start := time.Now()
+ err := n.Client.Ping(context.TODO()).Err()
+ if err == nil {
+ dur += uint64(time.Since(start) / time.Microsecond)
+ successes++
+ }
+ }
+
+ var latency float64
+ if successes == 0 {
+ // If none of the pings worked, set latency to some arbitrarily high value so this node gets
+ // least priority.
+ latency = float64((maximumNodeLatency) / time.Microsecond)
+ } else {
+ latency = float64(dur) / float64(successes)
+ }
+ atomic.StoreUint32(&n.latency, uint32(latency+0.5))
+ n.SetLastLatencyMeasurement(time.Now())
+}
+
+func (n *clusterNode) Latency() time.Duration {
+ latency := atomic.LoadUint32(&n.latency)
+ return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+ atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+ const timeout = 15 // 15 seconds
+
+ failing := atomic.LoadUint32(&n.failing)
+ if failing == 0 {
+ return false
+ }
+ if time.Now().Unix()-int64(failing) < timeout {
+ return true
+ }
+ atomic.StoreUint32(&n.failing, 0)
+ return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+ return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) LastLatencyMeasurement() int64 {
+ return atomic.LoadInt64(&n.lastLatencyMeasurement)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+ for {
+ v := atomic.LoadUint32(&n.generation)
+ if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+ break
+ }
+ }
+}
+
+func (n *clusterNode) SetLastLatencyMeasurement(t time.Time) {
+ for {
+ v := atomic.LoadInt64(&n.lastLatencyMeasurement)
+ if t.UnixNano() < v || atomic.CompareAndSwapInt64(&n.lastLatencyMeasurement, v, t.UnixNano()) {
+ break
+ }
+ }
+}
+
+func (n *clusterNode) Loading() bool {
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+
+ err := n.Client.Ping(ctx).Err()
+ return err != nil && isLoadingError(err)
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ addrs []string
+ nodes map[string]*clusterNode
+ activeAddrs []string
+ closed bool
+ onNewNode []func(rdb *Client)
+
+ _generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+
+ addrs: opt.Addrs,
+ nodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.nodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.nodes = nil
+ c.activeAddrs = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+ var addrs []string
+
+ c.mu.RLock()
+ closed := c.closed //nolint:ifshort
+ if !closed {
+ if len(c.activeAddrs) > 0 {
+ addrs = make([]string, len(c.activeAddrs))
+ copy(addrs, c.activeAddrs)
+ } else {
+ addrs = make([]string, len(c.addrs))
+ copy(addrs, c.addrs)
+ }
+ }
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+ return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+ //nolint:prealloc
+ var collected []*clusterNode
+
+ c.mu.Lock()
+
+ c.activeAddrs = c.activeAddrs[:0]
+ now := time.Now()
+ for addr, node := range c.nodes {
+ if node.Generation() >= generation {
+ c.activeAddrs = append(c.activeAddrs, addr)
+ if c.opt.RouteByLatency && node.LastLatencyMeasurement() < now.Add(-minLatencyMeasurementInterval).UnixNano() {
+ go node.updateLatency()
+ }
+ continue
+ }
+
+ delete(c.nodes, addr)
+ collected = append(collected, node)
+ }
+
+ c.mu.Unlock()
+
+ for _, node := range collected {
+ _ = node.Client.Close()
+ }
+}
+
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
+ node, err := c.get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok := c.nodes[addr]
+ if ok {
+ return node, nil
+ }
+
+ node = newClusterNode(c.opt, addr)
+ for _, fn := range c.onNewNode {
+ fn(node.Client)
+ }
+
+ c.addrs = appendIfNotExists(c.addrs, addr)
+ c.nodes[addr] = node
+
+ return node, nil
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var err error
+ c.mu.RLock()
+ if c.closed {
+ err = pool.ErrClosed
+ } else {
+ node = c.nodes[addr]
+ }
+ c.mu.RUnlock()
+ return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ cp := make([]*clusterNode, 0, len(c.nodes))
+ for _, node := range c.nodes {
+ cp = append(cp, node)
+ }
+ return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ n := rand.Intn(len(addrs))
+ return c.GetOrCreate(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+ nodes *clusterNodes
+ Masters []*clusterNode
+ Slaves []*clusterNode
+
+ slots []*clusterSlot
+
+ generation uint32
+ createdAt time.Time
+}
+
+func newClusterState(
+ nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+
+ slots: make([]*clusterSlot, 0, len(slots)),
+
+ generation: nodes.NextGeneration(),
+ createdAt: time.Now(),
+ }
+
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for i, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ node.SetGeneration(c.generation)
+ nodes = append(nodes, node)
+
+ if i == 0 {
+ c.Masters = appendUniqueNode(c.Masters, node)
+ } else {
+ c.Slaves = appendUniqueNode(c.Slaves, node)
+ }
+ }
+
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
+ }
+
+ sort.Sort(clusterSlotSlice(c.slots))
+
+ time.AfterFunc(time.Minute, func() {
+ nodes.GC(c.generation)
+ })
+
+ return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ slave := nodes[1]
+ if !slave.Failing() && !slave.Loading() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Failing() && !slave.Loading() {
+ return slave, nil
+ }
+ }
+
+ // All slaves are loading - use master.
+ return nodes[0], nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var allNodesFailing = true
+ var (
+ closestNonFailingNode *clusterNode
+ closestNode *clusterNode
+ minLatency time.Duration
+ )
+
+ // setting the max possible duration as zerovalue for minlatency
+ minLatency = time.Duration(math.MaxInt64)
+
+ for _, n := range nodes {
+ if closestNode == nil || n.Latency() < minLatency {
+ closestNode = n
+ minLatency = n.Latency()
+ if !n.Failing() {
+ closestNonFailingNode = n
+ allNodesFailing = false
+ }
+ }
+ }
+
+ // pick the healthly node with the lowest latency
+ if !allNodesFailing && closestNonFailingNode != nil {
+ return closestNonFailingNode, nil
+ }
+
+ // if all nodes are failing, we will pick the temporarily failing node with lowest latency
+ if minLatency < maximumNodeLatency && closestNode != nil {
+ internal.Logger.Printf(context.TODO(), "redis: all nodes are marked as failed, picking the temporarily failing node with lowest latency")
+ return closestNode, nil
+ }
+
+ // If all nodes are having the maximum latency(all pings are failing) - return a random node across the cluster
+ internal.Logger.Printf(context.TODO(), "redis: pings to all nodes are failing, picking a random node across the cluster")
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+ if len(nodes) == 1 {
+ return nodes[0], nil
+ }
+ randomNodes := rand.Perm(len(nodes))
+ for _, idx := range randomNodes {
+ if node := nodes[idx]; !node.Failing() {
+ return node, nil
+ }
+ }
+ return nodes[randomNodes[0]], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+ load func(ctx context.Context) (*clusterState, error)
+
+ state atomic.Value
+ reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
+ return &clusterStateHolder{
+ load: fn,
+ }
+}
+
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+ state, err := c.load(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c.state.Store(state)
+ return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+ go func() {
+ defer atomic.StoreUint32(&c.reloading, 0)
+
+ _, err := c.Reload(context.Background())
+ if err != nil {
+ return
+ }
+ time.Sleep(200 * time.Millisecond)
+ }()
+}
+
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
+ v := c.state.Load()
+ if v == nil {
+ return c.Reload(ctx)
+ }
+
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > 10*time.Second {
+ c.LazyReload()
+ }
+ return state, nil
+}
+
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+ state, err := c.Reload(ctx)
+ if err == nil {
+ return state, nil
+ }
+ return c.Get(ctx)
+}
+
+//------------------------------------------------------------------------------
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder
+ cmdsInfoCache *cmdsInfoCache
+ cmdable
+ hooksMixin
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ if opt == nil {
+ panic("redis: NewClusterClient nil options")
+ }
+ opt.init()
+
+ c := &ClusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ }
+
+ c.state = newClusterStateHolder(c.loadState)
+ c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+ c.cmdable = c.Process
+
+ c.initHooks(hooks{
+ dial: nil,
+ process: c.process,
+ pipeline: c.processPipeline,
+ txPipeline: c.processTxPipeline,
+ })
+
+ return c
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+ c.state.LazyReload()
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+ slot := c.cmdSlot(cmd)
+ var node *clusterNode
+ var moved bool
+ var ask bool
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ // MOVED and ASK responses are not transient errors that require retry delay; they
+ // should be attempted immediately.
+ if attempt > 0 && !moved && !ask {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ if node == nil {
+ var err error
+ node, err = c.cmdNode(ctx, cmd.Name(), slot)
+ if err != nil {
+ return err
+ }
+ }
+
+ if ask {
+ ask = false
+
+ pipe := node.Client.Pipeline()
+ _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+ _ = pipe.Process(ctx, cmd)
+ _, lastErr = pipe.Exec(ctx)
+ } else {
+ lastErr = node.Client.Process(ctx, cmd)
+ }
+
+ // If there is no error - we are done.
+ if lastErr == nil {
+ return nil
+ }
+ if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload()
+ }
+ node = nil
+ continue
+ }
+
+ // If slave is loading - pick another node.
+ if c.opt.ReadOnly && isLoadingError(lastErr) {
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ var addr string
+ moved, ask, addr = isMovedError(lastErr)
+ if moved || ask {
+ c.state.LazyReload()
+
+ var err error
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ // First retry the same node.
+ if attempt == 0 {
+ continue
+ }
+
+ // Second try another node.
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ return lastErr
+ }
+ return lastErr
+}
+
+func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) {
+ c.nodes.OnNewNode(fn)
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, master := range state.Masters {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, slave := range state.Slaves {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(slave)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachShard concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ worker := func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }
+
+ for _, node := range state.Masters {
+ wg.Add(1)
+ go worker(node)
+ }
+ for _, node := range state.Slaves {
+ wg.Add(1)
+ go worker(node)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ state, _ := c.state.Get(context.TODO())
+ if state == nil {
+ return &acc
+ }
+
+ for _, node := range state.Masters {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ for _, node := range state.Slaves {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ return &acc
+}
+
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ for _, idx := range rand.Perm(len(addrs)) {
+ addr := addrs[idx]
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ slots, err := node.Client.ClusterSlots(ctx).Result()
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+ }
+
+ /*
+ * No node is connectable. It's possible that all nodes' IP has changed.
+ * Clear activeAddrs to let client be able to re-connect using the initial
+ * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+ * which might have chance to resolve domain name and get updated IP address.
+ */
+ c.nodes.mu.Lock()
+ c.nodes.activeAddrs = nil
+ c.nodes.mu.Unlock()
+
+ return nil, firstErr
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ cmdsMap := newCmdsMap()
+
+ if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap.m {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+ c.processPipelineNode(ctx, node, cmds, failedCmds)
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) {
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+ }
+
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *ClusterClient) processPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ if !isContextError(err) {
+ node.MarkAsFailing()
+ }
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if isBadConn(err, false, node.Client.getAddr()) {
+ node.MarkAsFailing()
+ }
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
+ })
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ ctx context.Context,
+ node *clusterNode,
+ rd *proto.Reader,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ for i, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+
+ if err == nil {
+ continue
+ }
+
+ if c.checkMovedErr(ctx, cmd, err, failedCmds) {
+ continue
+ }
+
+ if c.opt.ReadOnly && isBadConn(err, false, node.Client.getAddr()) {
+ node.MarkAsFailing()
+ }
+
+ if !isRedisError(err) {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds[i+1:], err)
+ return err
+ }
+ }
+
+ if err := cmds[0].Err(); err != nil && shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ return err
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+ ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+ moved, ask, addr := isMovedError(err)
+ if !moved && !ask {
+ return false
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return false
+ }
+
+ if moved {
+ c.state.LazyReload()
+ failedCmds.Add(node, cmd)
+ return true
+ }
+
+ if ask {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ return true
+ }
+
+ panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsMap := c.mapCmdsBySlot(cmds)
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+ c.processTxPipelineNode(ctx, node, cmds, failedCmds)
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds.m
+ }
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap
+}
+
+func (c *ClusterClient) processTxPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processTxPipelineNodeConn(
+ ctx context.Context, _ *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := c.txPipelineReadQueued(
+ ctx, rd, statusCmd, trimmedCmds, failedCmds,
+ ); err != nil {
+ setCmdsErr(cmds, err)
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds)
+ }
+
+ return err
+ }
+
+ return pipelineReadCmds(rd, trimmedCmds)
+ })
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ ctx context.Context,
+ rd *proto.Reader,
+ statusCmd *StatusCmd,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(rd)
+ if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
+ continue
+ }
+ return err
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ if line[0] != proto.RespArray {
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+ ctx context.Context, cmds []Cmder,
+ moved, ask bool,
+ addr string,
+ failedCmds *cmdsMap,
+) error {
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+
+ if moved {
+ c.state.LazyReload()
+ for _, cmd := range cmds {
+ failedCmds.Add(node, cmd)
+ }
+ return nil
+ }
+
+ if ask {
+ for _, cmd := range cmds {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ return err
+ }
+ }
+
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ err = node.Client.Watch(ctx, fn, keys...)
+ if err == nil {
+ break
+ }
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload()
+ }
+ node, err = c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(err, true) {
+ continue
+ }
+
+ return err
+ }
+
+ return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+ var node *clusterNode
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ if node != nil {
+ panic("node != nil")
+ }
+
+ var err error
+ if len(channels) > 0 {
+ slot := hashtag.Slot(channels[0])
+ node, err = c.slotMasterNode(ctx, slot)
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ cn, err := node.Client.newConn(context.TODO())
+ if err != nil {
+ node = nil
+
+ return nil, err
+ }
+
+ return cn, nil
+ },
+ closeConn: func(cn *pool.Conn) error {
+ err := node.Client.connPool.CloseConn(cn)
+ node = nil
+ return err
+ },
+ }
+ pubsub.init()
+
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ // Try 3 random nodes.
+ const nodeLimit = 3
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ perm := rand.Perm(len(addrs))
+ if len(perm) > nodeLimit {
+ perm = perm[:nodeLimit]
+ }
+
+ for _, idx := range perm {
+ addr := addrs[idx]
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ info, err := node.Client.Command(ctx).Result()
+ if err == nil {
+ return info, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if firstErr == nil {
+ panic("not reached")
+ }
+ return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
+ if err != nil {
+ internal.Logger.Printf(context.TODO(), "getting command info: %s", err)
+ return nil
+ }
+
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+ args := cmd.Args()
+ if args[0] == "cluster" && (args[1] == "getkeysinslot" || args[1] == "countkeysinslot") {
+ return args[2].(int)
+ }
+
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(
+ ctx context.Context,
+ cmdName string,
+ slot int,
+) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.opt.ReadOnly {
+ cmdInfo := c.cmdInfo(ctx, cmdName)
+ if cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
+ }
+ return state.slotMasterNode(slot)
+}
+
+func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+ if c.opt.RouteByLatency {
+ return state.slotClosestNode(slot)
+ }
+ if c.opt.RouteRandomly {
+ return state.slotRandomNode(slot)
+ }
+ return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return state.slotMasterNode(slot)
+}
+
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ slot := hashtag.Slot(key)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+ slot := hashtag.Slot(key)
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+func (c *ClusterClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+ for _, n := range nodes {
+ if n == node {
+ return nodes
+ }
+ }
+ return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+ mu sync.Mutex
+ m map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+ return &cmdsMap{
+ m: make(map[*clusterNode][]Cmder),
+ }
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+ m.mu.Lock()
+ m.m[node] = append(m.m[node], cmds...)
+ m.mu.Unlock()
+}
diff --git a/vendor/github.com/redis/go-redis/v9/osscluster_commands.go b/vendor/github.com/redis/go-redis/v9/osscluster_commands.go
new file mode 100644
index 0000000..b13f8e7
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/osscluster_commands.go
@@ -0,0 +1,109 @@
+package redis
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var size int64
+ err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+ n, err := master.DBSize(ctx).Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = size
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptLoad(ctx, script).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ if cmd.Val() == "" {
+ cmd.val = val
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ return shard.ScriptFlush(ctx).Err()
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+
+ result := make([]bool, len(hashes))
+ for i := range result {
+ result[i] = true
+ }
+
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptExists(ctx, hashes...).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ for i, v := range val {
+ result[i] = result[i] && v
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = result
+ }
+ return nil
+ })
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/pipeline.go b/vendor/github.com/redis/go-redis/v9/pipeline.go
new file mode 100644
index 0000000..1c11420
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/pipeline.go
@@ -0,0 +1,121 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// single step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+ StatefulCmdable
+
+ // Len is to obtain the number of commands in the pipeline that have not yet been executed.
+ Len() int
+
+ // Do is an API for executing any command.
+ // If a certain Redis command is not yet supported, you can use Do to execute it.
+ Do(ctx context.Context, args ...interface{}) *Cmd
+
+ // Process is to put the commands to be executed into the pipeline buffer.
+ Process(ctx context.Context, cmd Cmder) error
+
+ // Discard is to discard all commands in the cache that have not yet been executed.
+ Discard()
+
+ // Exec is to send all the commands buffered in the pipeline to the redis-server.
+ Exec(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining.
+// Please note: it is not safe for concurrent use by multiple goroutines.
+type Pipeline struct {
+ cmdable
+ statefulCmdable
+
+ exec pipelineExecer
+ cmds []Cmder
+}
+
+func (c *Pipeline) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+ return len(c.cmds)
+}
+
+// Do queues the custom command for later execution.
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ if len(args) == 0 {
+ cmd.SetErr(errors.New("redis: please enter the command to be executed"))
+ return cmd
+ }
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
+ c.cmds = append(c.cmds, cmd)
+ return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() {
+ c.cmds = c.cmds[:0]
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
+ if len(c.cmds) == 0 {
+ return nil, nil
+ }
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ if err := fn(c); err != nil {
+ return nil, err
+ }
+ return c.Exec(ctx)
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+ return c
+}
+
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(ctx, fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+ return c
+}
diff --git a/vendor/github.com/redis/go-redis/v9/probabilistic.go b/vendor/github.com/redis/go-redis/v9/probabilistic.go
new file mode 100644
index 0000000..02ca263
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/probabilistic.go
@@ -0,0 +1,1461 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type ProbabilisticCmdable interface {
+ BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFCard(ctx context.Context, key string) *IntCmd
+ BFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFInfo(ctx context.Context, key string) *BFInfoCmd
+ BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd
+ BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd
+ BFInfoSize(ctx context.Context, key string) *BFInfoCmd
+ BFInfoFilters(ctx context.Context, key string) *BFInfoCmd
+ BFInfoItems(ctx context.Context, key string) *BFInfoCmd
+ BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd
+ BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd
+ BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd
+ BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFCount(ctx context.Context, key string, element interface{}) *IntCmd
+ CFDel(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFInfo(ctx context.Context, key string) *CFInfoCmd
+ CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd
+ CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd
+ CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd
+ CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd
+ CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd
+ CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd
+ CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ CMSInfo(ctx context.Context, key string) *CMSInfoCmd
+ CMSInitByDim(ctx context.Context, key string, width, height int64) *StatusCmd
+ CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd
+ CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd
+ CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd
+ CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+
+ TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKInfo(ctx context.Context, key string) *TopKInfoCmd
+ TopKList(ctx context.Context, key string) *StringSliceCmd
+ TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd
+ TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ TopKReserve(ctx context.Context, key string, k int64) *StatusCmd
+ TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd
+
+ TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd
+ TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestCreate(ctx context.Context, key string) *StatusCmd
+ TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd
+ TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd
+ TDigestMax(ctx context.Context, key string) *FloatCmd
+ TDigestMin(ctx context.Context, key string) *FloatCmd
+ TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd
+ TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestReset(ctx context.Context, key string) *StatusCmd
+ TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd
+}
+
+type BFInsertOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+ NoCreate bool
+}
+
+type BFReserveOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+}
+
+type CFReserveOptions struct {
+ Capacity int64
+ BucketSize int64
+ MaxIterations int64
+ Expansion int64
+}
+
+type CFInsertOptions struct {
+ Capacity int64
+ NoCreate bool
+}
+
+// -------------------------------------------
+// Bloom filter commands
+//-------------------------------------------
+
+// BFReserve creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveExpansion creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying an expansion rate for the filter.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveNonScaling creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying that the filter should not scale.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "NONSCALING"}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveWithArgs creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying additional options such as expansion rate and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key}
+ if options != nil {
+ args = append(args, options.Error, options.Capacity)
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFAdd adds an item to a Bloom filter.
+// For more information - https://redis.io/commands/bf.add/
+func (c cmdable) BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFCard returns the cardinality of a Bloom filter -
+// number of items that were added to a Bloom filter and detected as unique
+// (items that caused at least one bit to be set in at least one sub-filter).
+// For more information - https://redis.io/commands/bf.card/
+func (c cmdable) BFCard(ctx context.Context, key string) *IntCmd {
+ args := []interface{}{"BF.CARD", key}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFExists determines whether a given item was added to a Bloom filter.
+// For more information - https://redis.io/commands/bf.exists/
+func (c cmdable) BFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFLoadChunk restores a Bloom filter previously saved using BF.SCANDUMP.
+// For more information - https://redis.io/commands/bf.loadchunk/
+func (c cmdable) BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"BF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Begins an incremental save of the Bloom filter.
+// This command is useful for large Bloom filters that cannot fit into the DUMP and RESTORE model.
+// For more information - https://redis.io/commands/bf.scandump/
+func (c cmdable) BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"BF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type ScanDump struct {
+ Iter int64
+ Data string
+}
+
+type ScanDumpCmd struct {
+ baseCmd
+
+ val ScanDump
+}
+
+func newScanDumpCmd(ctx context.Context, args ...interface{}) *ScanDumpCmd {
+ return &ScanDumpCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ScanDumpCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ScanDumpCmd) SetVal(val ScanDump) {
+ cmd.val = val
+}
+
+func (cmd *ScanDumpCmd) Result() (ScanDump, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ScanDumpCmd) Val() ScanDump {
+ return cmd.val
+}
+
+func (cmd *ScanDumpCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = ScanDump{}
+ for i := 0; i < n; i++ {
+ iter, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ data, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Data = data
+ cmd.val.Iter = iter
+
+ }
+
+ return nil
+}
+
+// Returns information about a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfo(ctx context.Context, key string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BFInfo struct {
+ Capacity int64
+ Size int64
+ Filters int64
+ ItemsInserted int64
+ ExpansionRate int64
+}
+
+type BFInfoCmd struct {
+ baseCmd
+
+ val BFInfo
+}
+
+func NewBFInfoCmd(ctx context.Context, args ...interface{}) *BFInfoCmd {
+ return &BFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BFInfoCmd) SetVal(val BFInfo) {
+ cmd.val = val
+}
+
+func (cmd *BFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BFInfoCmd) Val() BFInfo {
+ return cmd.val
+}
+
+func (cmd *BFInfoCmd) Result() (BFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ result := BFInfo{}
+
+ // Create a mapping from key names to pointers of struct fields
+ respMapping := map[string]*int64{
+ "Capacity": &result.Capacity,
+ "CAPACITY": &result.Capacity,
+ "Size": &result.Size,
+ "SIZE": &result.Size,
+ "Number of filters": &result.Filters,
+ "FILTERS": &result.Filters,
+ "Number of items inserted": &result.ItemsInserted,
+ "ITEMS": &result.ItemsInserted,
+ "Expansion rate": &result.ExpansionRate,
+ "EXPANSION": &result.ExpansionRate,
+ }
+
+ // Helper function to read and assign a value based on the key
+ readAndAssignValue := func(key string) error {
+ fieldPtr, exists := respMapping[key]
+ if !exists {
+ return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key)
+ }
+
+ // Read the integer and assign to the field via pointer dereferencing
+ val, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ *fieldPtr = val
+ return nil
+ }
+
+ readType, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ if len(cmd.args) > 2 && readType == proto.RespArray {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if key, ok := cmd.args[2].(string); ok && n == 1 {
+ if err := readAndAssignValue(key); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("redis: BLOOM.INFO invalid argument key type")
+ }
+ } else {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if err := readAndAssignValue(key); err != nil {
+ return err
+ }
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// BFInfoCapacity returns information about the capacity of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "CAPACITY")
+}
+
+// BFInfoSize returns information about the size of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoSize(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "SIZE")
+}
+
+// BFInfoFilters returns information about the filters of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoFilters(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "FILTERS")
+}
+
+// BFInfoItems returns information about the items of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoItems(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "ITEMS")
+}
+
+// BFInfoExpansion returns information about the expansion rate of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "EXPANSION")
+}
+
+// BFInfoArg returns information about a specific option of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key, option}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFInsert inserts elements into a Bloom filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.insert/
+func (c cmdable) BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.INSERT", key}
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.Error != 0 {
+ args = append(args, "ERROR", options.Error)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMAdd adds multiple elements to a Bloom filter.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/bf.madd/
+func (c cmdable) BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MADD", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMExists check if multiple elements exist in a Bloom filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/bf.mexists/
+func (c cmdable) BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MEXISTS", key}
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// Cuckoo filter commands
+//-------------------------------------------
+
+// CFReserve creates an empty Cuckoo filter with the specified capacity.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveExpansion creates an empty Cuckoo filter with the specified capacity and expansion rate.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveBucketSize creates an empty Cuckoo filter with the specified capacity and bucket size.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "BUCKETSIZE", bucketsize}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveMaxIterations creates an empty Cuckoo filter with the specified capacity and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "MAXITERATIONS", maxiterations}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveWithArgs creates an empty Cuckoo filter with the specified options.
+// This function allows for specifying additional options such as bucket size and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, options.Capacity}
+ if options.BucketSize != 0 {
+ args = append(args, "BUCKETSIZE", options.BucketSize)
+ }
+ if options.MaxIterations != 0 {
+ args = append(args, "MAXITERATIONS", options.MaxIterations)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAdd adds an element to a Cuckoo filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.add/
+func (c cmdable) CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAddNX adds an element to a Cuckoo filter only if it does not already exist in the filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.addnx/
+func (c cmdable) CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADDNX", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFCount returns an estimate of the number of times an element may be in a Cuckoo Filter.
+// For more information - https://redis.io/commands/cf.count/
+func (c cmdable) CFCount(ctx context.Context, key string, element interface{}) *IntCmd {
+ args := []interface{}{"CF.COUNT", key, element}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFDel deletes an item once from the cuckoo filter.
+// For more information - https://redis.io/commands/cf.del/
+func (c cmdable) CFDel(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.DEL", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFExists determines whether an item may exist in the Cuckoo Filter or not.
+// For more information - https://redis.io/commands/cf.exists/
+func (c cmdable) CFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFLoadChunk restores a filter previously saved using SCANDUMP.
+// For more information - https://redis.io/commands/cf.loadchunk/
+func (c cmdable) CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"CF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFScanDump begins an incremental save of the cuckoo filter.
+// For more information - https://redis.io/commands/cf.scandump/
+func (c cmdable) CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"CF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CFInfo struct {
+ Size int64
+ NumBuckets int64
+ NumFilters int64
+ NumItemsInserted int64
+ NumItemsDeleted int64
+ BucketSize int64
+ ExpansionRate int64
+ MaxIteration int64
+}
+
+type CFInfoCmd struct {
+ baseCmd
+
+ val CFInfo
+}
+
+func NewCFInfoCmd(ctx context.Context, args ...interface{}) *CFInfoCmd {
+ return &CFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CFInfoCmd) SetVal(val CFInfo) {
+ cmd.val = val
+}
+
+func (cmd *CFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CFInfoCmd) Val() CFInfo {
+ return cmd.val
+}
+
+func (cmd *CFInfoCmd) Result() (CFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of buckets":
+ result.NumBuckets, err = rd.ReadInt()
+ case "Number of filters":
+ result.NumFilters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.NumItemsInserted, err = rd.ReadInt()
+ case "Number of items deleted":
+ result.NumItemsDeleted, err = rd.ReadInt()
+ case "Bucket size":
+ result.BucketSize, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ case "Max iterations":
+ result.MaxIteration, err = rd.ReadInt()
+
+ default:
+ return fmt.Errorf("redis: CF.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CFInfo returns information about a Cuckoo filter.
+// For more information - https://redis.io/commands/cf.info/
+func (c cmdable) CFInfo(ctx context.Context, key string) *CFInfoCmd {
+ args := []interface{}{"CF.INFO", key}
+ cmd := NewCFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsert inserts elements into a Cuckoo filter.
+// This function also allows for specifying additional options such as capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insert/
+func (c cmdable) CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.INSERT", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsertNX inserts elements into a Cuckoo filter only if they do not already exist in the filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of integers indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insertnx/
+func (c cmdable) CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CF.INSERTNX", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) getCfInsertWithArgs(args []interface{}, options *CFInsertOptions, elements ...interface{}) []interface{} {
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ return args
+}
+
+// CFMExists check if multiple elements exist in a Cuckoo filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/cf.mexists/
+func (c cmdable) CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.MEXISTS", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// CMS commands
+//-------------------------------------------
+
+// CMSIncrBy increments the count of one or more items in a Count-Min Sketch filter.
+// Returns an array of integers representing the updated count of each item.
+// For more information - https://redis.io/commands/cms.incrby/
+func (c cmdable) CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "CMS.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CMSInfo struct {
+ Width int64
+ Depth int64
+ Count int64
+}
+
+type CMSInfoCmd struct {
+ baseCmd
+
+ val CMSInfo
+}
+
+func NewCMSInfoCmd(ctx context.Context, args ...interface{}) *CMSInfoCmd {
+ return &CMSInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CMSInfoCmd) SetVal(val CMSInfo) {
+ cmd.val = val
+}
+
+func (cmd *CMSInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CMSInfoCmd) Val() CMSInfo {
+ return cmd.val
+}
+
+func (cmd *CMSInfoCmd) Result() (CMSInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CMSInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CMSInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "count":
+ result.Count, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: CMS.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CMSInfo returns information about a Count-Min Sketch filter.
+// For more information - https://redis.io/commands/cms.info/
+func (c cmdable) CMSInfo(ctx context.Context, key string) *CMSInfoCmd {
+ args := []interface{}{"CMS.INFO", key}
+ cmd := NewCMSInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByDim creates an empty Count-Min Sketch filter with the specified dimensions.
+// For more information - https://redis.io/commands/cms.initbydim/
+func (c cmdable) CMSInitByDim(ctx context.Context, key string, width, depth int64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYDIM", key, width, depth}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByProb creates an empty Count-Min Sketch filter with the specified error rate and probability.
+// For more information - https://redis.io/commands/cms.initbyprob/
+func (c cmdable) CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYPROB", key, errorRate, probability}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMerge merges multiple Count-Min Sketch filters into a single filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"CMS.MERGE", destKey, len(sourceKeys)}
+ for _, s := range sourceKeys {
+ args = append(args, s)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMergeWithWeight merges multiple Count-Min Sketch filters into a single filter with weights for each source filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd {
+ args := make([]interface{}, 0, 4+(len(sourceKeys)*2+1))
+ args = append(args, "CMS.MERGE", destKey, len(sourceKeys))
+
+ if len(sourceKeys) > 0 {
+ sk := make([]interface{}, len(sourceKeys))
+ sw := make([]interface{}, len(sourceKeys))
+
+ i := 0
+ for k, w := range sourceKeys {
+ sk[i] = k
+ sw[i] = w
+ i++
+ }
+
+ args = append(args, sk...)
+ args = append(args, "WEIGHTS")
+ args = append(args, sw...)
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSQuery returns count for item(s).
+// For more information - https://redis.io/commands/cms.query/
+func (c cmdable) CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CMS.QUERY", key}
+ args = append(args, elements...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// TopK commands
+//--------------------------------------------
+
+// TopKAdd adds one or more elements to a Top-K filter.
+// Returns an array of strings representing the items that were removed from the filter, if any.
+// For more information - https://redis.io/commands/topk.add/
+func (c cmdable) TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.ADD"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserve creates an empty Top-K filter with the specified number of top items to keep.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserve(ctx context.Context, key string, k int64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserveWithOptions creates an empty Top-K filter with the specified number of top items to keep and additional options.
+// This function allows for specifying additional options such as width, depth and decay.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k, width, depth, decay}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TopKInfo struct {
+ K int64
+ Width int64
+ Depth int64
+ Decay float64
+}
+
+type TopKInfoCmd struct {
+ baseCmd
+
+ val TopKInfo
+}
+
+func NewTopKInfoCmd(ctx context.Context, args ...interface{}) *TopKInfoCmd {
+ return &TopKInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TopKInfoCmd) SetVal(val TopKInfo) {
+ cmd.val = val
+}
+
+func (cmd *TopKInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TopKInfoCmd) Val() TopKInfo {
+ return cmd.val
+}
+
+func (cmd *TopKInfoCmd) Result() (TopKInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TopKInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TopKInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "k":
+ result.K, err = rd.ReadInt()
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "decay":
+ result.Decay, err = rd.ReadFloat()
+ default:
+ return fmt.Errorf("redis: topk.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TopKInfo returns information about a Top-K filter.
+// For more information - https://redis.io/commands/topk.info/
+func (c cmdable) TopKInfo(ctx context.Context, key string) *TopKInfoCmd {
+ args := []interface{}{"TOPK.INFO", key}
+
+ cmd := NewTopKInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKQuery check if multiple elements exist in a Top-K filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/topk.query/
+func (c cmdable) TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.QUERY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKCount returns an estimate of the number of times an item may be in a Top-K filter.
+// For more information - https://redis.io/commands/topk.count/
+func (c cmdable) TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.COUNT"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKIncrBy increases the count of one or more items in a Top-K filter.
+// For more information - https://redis.io/commands/topk.incrby/
+func (c cmdable) TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKList returns all items in Top-K list.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKList(ctx context.Context, key string) *StringSliceCmd {
+ args := []interface{}{"TOPK.LIST", key}
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKListWithCount returns all items in Top-K list with their respective count.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd {
+ args := []interface{}{"TOPK.LIST", key, "WITHCOUNT"}
+
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// t-digest commands
+// --------------------------------------------
+
+// TDigestAdd adds one or more elements to a t-Digest data structure.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.add/
+func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.ADD"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRank returns an array of values from a t-Digest data structure based on their rank.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrank/
+func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRevRank returns an array of values from a t-Digest data structure based on their reverse rank.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrevrank/
+func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYREVRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCDF returns an array of cumulative distribution function (CDF) values for one or more elements in a t-Digest data structure.
+// The CDF value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the CDF values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.cdf/
+func (c cmdable) TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.CDF"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreate creates an empty t-Digest data structure with default parameters.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreateWithCompression creates an empty t-Digest data structure with a specified compression parameter.
+// The compression parameter controls the accuracy and memory usage of the t-Digest.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key, "COMPRESSION", compression}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestInfo struct {
+ Compression int64
+ Capacity int64
+ MergedNodes int64
+ UnmergedNodes int64
+ MergedWeight int64
+ UnmergedWeight int64
+ Observations int64
+ TotalCompressions int64
+ MemoryUsage int64
+}
+
+type TDigestInfoCmd struct {
+ baseCmd
+
+ val TDigestInfo
+}
+
+func NewTDigestInfoCmd(ctx context.Context, args ...interface{}) *TDigestInfoCmd {
+ return &TDigestInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TDigestInfoCmd) SetVal(val TDigestInfo) {
+ cmd.val = val
+}
+
+func (cmd *TDigestInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TDigestInfoCmd) Val() TDigestInfo {
+ return cmd.val
+}
+
+func (cmd *TDigestInfoCmd) Result() (TDigestInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TDigestInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TDigestInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Compression":
+ result.Compression, err = rd.ReadInt()
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Merged nodes":
+ result.MergedNodes, err = rd.ReadInt()
+ case "Unmerged nodes":
+ result.UnmergedNodes, err = rd.ReadInt()
+ case "Merged weight":
+ result.MergedWeight, err = rd.ReadInt()
+ case "Unmerged weight":
+ result.UnmergedWeight, err = rd.ReadInt()
+ case "Observations":
+ result.Observations, err = rd.ReadInt()
+ case "Total compressions":
+ result.TotalCompressions, err = rd.ReadInt()
+ case "Memory usage":
+ result.MemoryUsage, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: tdigest.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TDigestInfo returns information about a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.info/
+func (c cmdable) TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd {
+ args := []interface{}{"TDIGEST.INFO", key}
+
+ cmd := NewTDigestInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMax returns the maximum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.max/
+func (c cmdable) TDigestMax(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MAX", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestMergeOptions struct {
+ Compression int64
+ Override bool
+}
+
+// TDigestMerge merges multiple t-Digest data structures into a single t-Digest.
+// This function also allows for specifying additional options such as compression and override behavior.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.merge/
+func (c cmdable) TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"TDIGEST.MERGE", destKey, len(sourceKeys)}
+
+ for _, sourceKey := range sourceKeys {
+ args = append(args, sourceKey)
+ }
+
+ if options != nil {
+ if options.Compression != 0 {
+ args = append(args, "COMPRESSION", options.Compression)
+ }
+ if options.Override {
+ args = append(args, "OVERRIDE")
+ }
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMin returns the minimum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.min/
+func (c cmdable) TDigestMin(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MIN", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestQuantile returns an array of quantile values for one or more elements in a t-Digest data structure.
+// The quantile value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the quantile values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.quantile/
+func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.QUANTILE"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRank returns an array of rank values for one or more elements in a t-Digest data structure.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of integers representing the rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.rank/
+func (c cmdable) TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.RANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestReset resets a t-Digest data structure to its initial state.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.reset/
+func (c cmdable) TDigestReset(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.RESET", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRevRank returns an array of reverse rank values for one or more elements in a t-Digest data structure.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of integers representing the reverse rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.revrank/
+func (c cmdable) TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.REVRANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestTrimmedMean returns the trimmed mean value from a t-Digest data structure.
+// The trimmed mean is calculated by removing a specified fraction of the highest and lowest values from the t-Digest and then calculating the mean of the remaining values.
+// Returns a float representing the trimmed mean value or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.trimmed_mean/
+func (c cmdable) TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd {
+ args := []interface{}{"TDIGEST.TRIMMED_MEAN", key, lowCutQuantile, highCutQuantile}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/pubsub.go b/vendor/github.com/redis/go-redis/v9/pubsub.go
new file mode 100644
index 0000000..2a0e7a8
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/pubsub.go
@@ -0,0 +1,732 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+ opt *Options
+
+ newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
+ closeConn func(*pool.Conn) error
+
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+ schannels map[string]struct{}
+
+ closed bool
+ exit chan struct{}
+
+ cmd *Cmd
+
+ chOnce sync.Once
+ msgCh *channel
+ allCh *channel
+}
+
+func (c *PubSub) init() {
+ c.exit = make(chan struct{})
+}
+
+func (c *PubSub) String() string {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ channels := mapKeys(c.channels)
+ channels = append(channels, mapKeys(c.patterns)...)
+ channels = append(channels, mapKeys(c.schannels)...)
+ return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
+ c.mu.Lock()
+ cn, err := c.conn(ctx, nil)
+ c.mu.Unlock()
+ return cn, err
+}
+
+func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+ if c.cn != nil {
+ return c.cn, nil
+ }
+
+ channels := mapKeys(c.channels)
+ channels = append(channels, newChannels...)
+
+ cn, err := c.newConn(ctx, channels)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.resubscribe(ctx, cn); err != nil {
+ _ = c.closeConn(cn)
+ return nil, err
+ }
+
+ c.cn = cn
+ return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+ return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+}
+
+func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
+ var firstErr error
+
+ if len(c.channels) > 0 {
+ firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
+ }
+
+ if len(c.patterns) > 0 {
+ err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if len(c.schannels) > 0 {
+ err := c._subscribe(ctx, cn, "ssubscribe", mapKeys(c.schannels))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+ s := make([]string, len(m))
+ i := 0
+ for k := range m {
+ s[i] = k
+ i++
+ }
+ return s
+}
+
+func (c *PubSub) _subscribe(
+ ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
+) error {
+ args := make([]interface{}, 0, 1+len(channels))
+ args = append(args, redisCmd)
+ for _, channel := range channels {
+ args = append(args, channel)
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ return c.writeCmd(ctx, cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(
+ ctx context.Context,
+ cn *pool.Conn,
+ err error,
+ allowTimeout bool,
+) {
+ c.mu.Lock()
+ c.releaseConn(ctx, cn, err, allowTimeout)
+ c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
+ if c.cn != cn {
+ return
+ }
+ if isBadConn(err, allowTimeout, c.opt.Addr) {
+ c.reconnect(ctx, err)
+ }
+}
+
+func (c *PubSub) reconnect(ctx context.Context, reason error) {
+ _ = c.closeTheCn(reason)
+ _, _ = c.conn(ctx, nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+ if c.cn == nil {
+ return nil
+ }
+ if !c.closed {
+ internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
+ }
+ err := c.closeConn(c.cn)
+ c.cn = nil
+ return err
+}
+
+func (c *PubSub) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.closed = true
+ close(c.exit)
+
+ return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "subscribe", channels...)
+ if c.channels == nil {
+ c.channels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.channels[s] = struct{}{}
+ }
+ return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "psubscribe", patterns...)
+ if c.patterns == nil {
+ c.patterns = make(map[string]struct{})
+ }
+ for _, s := range patterns {
+ c.patterns[s] = struct{}{}
+ }
+ return err
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *PubSub) SSubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "ssubscribe", channels...)
+ if c.schannels == nil {
+ c.schannels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.schannels[s] = struct{}{}
+ }
+ return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.channels {
+ delete(c.channels, channel)
+ }
+ }
+
+ err := c.subscribe(ctx, "unsubscribe", channels...)
+ return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(patterns) > 0 {
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ } else {
+ // Unsubscribe from all patterns.
+ for pattern := range c.patterns {
+ delete(c.patterns, pattern)
+ }
+ }
+
+ err := c.subscribe(ctx, "punsubscribe", patterns...)
+ return err
+}
+
+// SUnsubscribe unsubscribes the client from the given shard channels,
+// or from all of them if none is given.
+func (c *PubSub) SUnsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.schannels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.schannels {
+ delete(c.schannels, channel)
+ }
+ }
+
+ err := c.subscribe(ctx, "sunsubscribe", channels...)
+ return err
+}
+
+func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
+ cn, err := c.conn(ctx, channels)
+ if err != nil {
+ return err
+ }
+
+ err = c._subscribe(ctx, cn, redisCmd, channels)
+ c.releaseConn(ctx, cn, err, false)
+ return err
+}
+
+func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
+ args := []interface{}{"ping"}
+ if len(payload) == 1 {
+ args = append(args, payload[0])
+ }
+ cmd := NewCmd(ctx, args...)
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ cn, err := c.conn(ctx, nil)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeCmd(ctx, cn, cmd)
+ c.releaseConn(ctx, cn, err, false)
+ return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+ // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+ Kind string
+ // Channel name we have subscribed to.
+ Channel string
+ // Number of channels we are currently subscribed to.
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+ Channel string
+ Pattern string
+ Payload string
+ PayloadSlice []string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+ Payload string
+}
+
+func (p *Pong) String() string {
+ if p.Payload != "" {
+ return fmt.Sprintf("Pong<%s>", p.Payload)
+ }
+ return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+ switch reply := reply.(type) {
+ case string:
+ return &Pong{
+ Payload: reply,
+ }, nil
+ case []interface{}:
+ switch kind := reply[0].(string); kind {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe", "ssubscribe", "sunsubscribe":
+ // Can be nil in case of "unsubscribe".
+ channel, _ := reply[1].(string)
+ return &Subscription{
+ Kind: kind,
+ Channel: channel,
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message", "smessage":
+ switch payload := reply[2].(type) {
+ case string:
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: payload,
+ }, nil
+ case []interface{}:
+ ss := make([]string, len(payload))
+ for i, s := range payload {
+ ss[i] = s.(string)
+ }
+ return &Message{
+ Channel: reply[1].(string),
+ PayloadSlice: ss,
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
+ }
+ case "pmessage":
+ return &Message{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ case "pong":
+ return &Pong{
+ Payload: reply[1].(string),
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+ }
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
+ if c.cmd == nil {
+ c.cmd = NewCmd(ctx)
+ }
+
+ // Don't hold the lock to allow subscriptions and pings.
+
+ cn, err := c.connWithLock(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+ return c.cmd.readReply(rd)
+ })
+
+ c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
+ return c.ReceiveTimeout(ctx, 0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ return msg, nil
+ default:
+ err := fmt.Errorf("redis: unknown message: %T", msg)
+ return nil, err
+ }
+ }
+}
+
+func (c *PubSub) getContext() context.Context {
+ if c.cmd != nil {
+ return c.cmd.ctx
+ }
+ return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 1 minute the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not received for 1 minute.
+func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
+ c.chOnce.Do(func() {
+ c.msgCh = newChannel(c, opts...)
+ c.msgCh.initMsgChan()
+ })
+ if c.msgCh == nil {
+ err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+ panic(err)
+ }
+ return c.msgCh.msgCh
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+//
+// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+ return c.Channel(WithChannelSize(size))
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(opts ...ChannelOption) <-chan interface{} {
+ c.chOnce.Do(func() {
+ c.allCh = newChannel(c, opts...)
+ c.allCh.initAllChan()
+ })
+ if c.allCh == nil {
+ err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+ panic(err)
+ }
+ return c.allCh.allCh
+}
+
+type ChannelOption func(c *channel)
+
+// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
+//
+// The default is 100 messages.
+func WithChannelSize(size int) ChannelOption {
+ return func(c *channel) {
+ c.chanSize = size
+ }
+}
+
+// WithChannelHealthCheckInterval specifies the health check interval.
+// PubSub will ping Redis Server if it does not receive any messages within the interval.
+// To disable health check, use zero interval.
+//
+// The default is 3 seconds.
+func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
+ return func(c *channel) {
+ c.checkInterval = d
+ }
+}
+
+// WithChannelSendTimeout specifies the channel send timeout after which
+// the message is dropped.
+//
+// The default is 60 seconds.
+func WithChannelSendTimeout(d time.Duration) ChannelOption {
+ return func(c *channel) {
+ c.chanSendTimeout = d
+ }
+}
+
+type channel struct {
+ pubSub *PubSub
+
+ msgCh chan *Message
+ allCh chan interface{}
+ ping chan struct{}
+
+ chanSize int
+ chanSendTimeout time.Duration
+ checkInterval time.Duration
+}
+
+func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
+ c := &channel{
+ pubSub: pubSub,
+
+ chanSize: 100,
+ chanSendTimeout: time.Minute,
+ checkInterval: 3 * time.Second,
+ }
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.checkInterval > 0 {
+ c.initHealthCheck()
+ }
+ return c
+}
+
+func (c *channel) initHealthCheck() {
+ ctx := context.TODO()
+ c.ping = make(chan struct{}, 1)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ for {
+ timer.Reset(c.checkInterval)
+ select {
+ case <-c.ping:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
+ c.pubSub.mu.Lock()
+ c.pubSub.reconnect(ctx, pingErr)
+ c.pubSub.mu.Unlock()
+ }
+ case <-c.pubSub.exit:
+ return
+ }
+ }
+ }()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *channel) initMsgChan() {
+ ctx := context.TODO()
+ c.msgCh = make(chan *Message, c.chanSize)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.pubSub.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.msgCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ timer.Reset(c.chanSendTimeout)
+ select {
+ case c.msgCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ ctx, "redis: %s channel is full for %s (message is dropped)",
+ c, c.chanSendTimeout)
+ }
+ default:
+ internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *channel) initAllChan() {
+ ctx := context.TODO()
+ c.allCh = make(chan interface{}, c.chanSize)
+
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.pubSub.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.allCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Pong:
+ // Ignore.
+ case *Subscription, *Message:
+ timer.Reset(c.chanSendTimeout)
+ select {
+ case c.allCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ ctx, "redis: %s channel is full for %s (message is dropped)",
+ c, c.chanSendTimeout)
+ }
+ default:
+ internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
diff --git a/vendor/github.com/redis/go-redis/v9/pubsub_commands.go b/vendor/github.com/redis/go-redis/v9/pubsub_commands.go
new file mode 100644
index 0000000..28622aa
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/pubsub_commands.go
@@ -0,0 +1,76 @@
+package redis
+
+import "context"
+
+type PubSubCmdable interface {
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ SPublish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+ PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+}
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SPublish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "spublish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "shardchannels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "shardnumsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/redis.go b/vendor/github.com/redis/go-redis/v9/redis.go
new file mode 100644
index 0000000..bafe82f
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/redis.go
@@ -0,0 +1,970 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/auth"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// Scanner internal/hscan.Scanner exposed interface.
+type Scanner = hscan.Scanner
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+// SetLogger set custom log
+func SetLogger(logger internal.Logging) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ DialHook(next DialHook) DialHook
+ ProcessHook(next ProcessHook) ProcessHook
+ ProcessPipelineHook(next ProcessPipelineHook) ProcessPipelineHook
+}
+
+type (
+ DialHook func(ctx context.Context, network, addr string) (net.Conn, error)
+ ProcessHook func(ctx context.Context, cmd Cmder) error
+ ProcessPipelineHook func(ctx context.Context, cmds []Cmder) error
+)
+
+type hooksMixin struct {
+ hooksMu *sync.RWMutex
+
+ slice []Hook
+ initial hooks
+ current hooks
+}
+
+func (hs *hooksMixin) initHooks(hooks hooks) {
+ hs.hooksMu = new(sync.RWMutex)
+ hs.initial = hooks
+ hs.chain()
+}
+
+type hooks struct {
+ dial DialHook
+ process ProcessHook
+ pipeline ProcessPipelineHook
+ txPipeline ProcessPipelineHook
+}
+
+func (h *hooks) setDefaults() {
+ if h.dial == nil {
+ h.dial = func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, nil }
+ }
+ if h.process == nil {
+ h.process = func(ctx context.Context, cmd Cmder) error { return nil }
+ }
+ if h.pipeline == nil {
+ h.pipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+ if h.txPipeline == nil {
+ h.txPipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+}
+
+// AddHook is to add a hook to the queue.
+// Hook is a function executed during network connection, command execution, and pipeline,
+// it is a first-in-first-out stack queue (FIFO).
+// You need to execute the next hook in each hook, unless you want to terminate the execution of the command.
+// For example, you added hook-1, hook-2:
+//
+// client.AddHook(hook-1, hook-2)
+//
+// hook-1:
+//
+// func (Hook1) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd Cmder) error {
+// print("hook-1 start")
+// next(ctx, cmd)
+// print("hook-1 end")
+// return nil
+// }
+// }
+//
+// hook-2:
+//
+// func (Hook2) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd redis.Cmder) error {
+// print("hook-2 start")
+// next(ctx, cmd)
+// print("hook-2 end")
+// return nil
+// }
+// }
+//
+// The execution sequence is:
+//
+// hook-1 start -> hook-2 start -> exec redis cmd -> hook-2 end -> hook-1 end
+//
+// Please note: "next(ctx, cmd)" is very important, it will call the next hook,
+// if "next(ctx, cmd)" is not executed, the redis command will not be executed.
+func (hs *hooksMixin) AddHook(hook Hook) {
+ hs.slice = append(hs.slice, hook)
+ hs.chain()
+}
+
+func (hs *hooksMixin) chain() {
+ hs.initial.setDefaults()
+
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ hs.current.dial = hs.initial.dial
+ hs.current.process = hs.initial.process
+ hs.current.pipeline = hs.initial.pipeline
+ hs.current.txPipeline = hs.initial.txPipeline
+
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].DialHook(hs.current.dial); wrapped != nil {
+ hs.current.dial = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessHook(hs.current.process); wrapped != nil {
+ hs.current.process = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.pipeline); wrapped != nil {
+ hs.current.pipeline = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.txPipeline); wrapped != nil {
+ hs.current.txPipeline = wrapped
+ }
+ }
+}
+
+func (hs *hooksMixin) clone() hooksMixin {
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ clone := *hs
+ l := len(clone.slice)
+ clone.slice = clone.slice[:l:l]
+ clone.hooksMu = new(sync.RWMutex)
+ return clone
+}
+
+func (hs *hooksMixin) withProcessHook(ctx context.Context, cmd Cmder, hook ProcessHook) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmd)
+}
+
+func (hs *hooksMixin) withProcessPipelineHook(
+ ctx context.Context, cmds []Cmder, hook ProcessPipelineHook,
+) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessPipelineHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmds)
+}
+
+func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) {
+ // Access to hs.current is guarded by a read-only lock since it may be mutated by AddHook(...)
+ // while this dialer is concurrently accessed by the background connection pool population
+ // routine when MinIdleConns > 0.
+ hs.hooksMu.RLock()
+ current := hs.current
+ hs.hooksMu.RUnlock()
+
+ return current.dial(ctx, network, addr)
+}
+
+func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error {
+ return hs.current.process(ctx, cmd)
+}
+
+func (hs *hooksMixin) processPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.pipeline(ctx, cmds)
+}
+
+func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.txPipeline(ctx, cmds)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+ hooksMixin
+
+ onClose func() error // hook called when client is closed
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cn.Inited {
+ return cn, nil
+ }
+
+ if err := c.initConn(ctx, cn); err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := errors.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) newReAuthCredentialsListener(poolCn *pool.Conn) auth.CredentialsListener {
+ return auth.NewReAuthCredentialsListener(
+ c.reAuthConnection(poolCn),
+ c.onAuthenticationErr(poolCn),
+ )
+}
+
+func (c *baseClient) reAuthConnection(poolCn *pool.Conn) func(credentials auth.Credentials) error {
+ return func(credentials auth.Credentials) error {
+ var err error
+ username, password := credentials.BasicAuth()
+ ctx := context.Background()
+ connPool := pool.NewSingleConnPool(c.connPool, poolCn)
+ // hooksMixin are intentionally empty here
+ cn := newConn(c.opt, connPool, nil)
+
+ if username != "" {
+ err = cn.AuthACL(ctx, username, password).Err()
+ } else {
+ err = cn.Auth(ctx, password).Err()
+ }
+ return err
+ }
+}
+func (c *baseClient) onAuthenticationErr(poolCn *pool.Conn) func(err error) {
+ return func(err error) {
+ if err != nil {
+ if isBadConn(err, false, c.opt.Addr) {
+ // Close the connection to force a reconnection.
+ err := c.connPool.CloseConn(poolCn)
+ if err != nil {
+ internal.Logger.Printf(context.Background(), "redis: failed to close connection: %v", err)
+ // try to close the network connection directly
+ // so that no resource is leaked
+ err := poolCn.Close()
+ if err != nil {
+ internal.Logger.Printf(context.Background(), "redis: failed to close network connection: %v", err)
+ }
+ }
+ }
+ internal.Logger.Printf(context.Background(), "redis: re-authentication failed: %v", err)
+ }
+ }
+}
+
+func (c *baseClient) wrappedOnClose(newOnClose func() error) func() error {
+ onClose := c.onClose
+ return func() error {
+ var firstErr error
+ err := newOnClose()
+ // Even if we have an error we would like to execute the onClose hook
+ // if it exists. We will return the first error that occurred.
+ // This is to keep error handling consistent with the rest of the code.
+ if err != nil {
+ firstErr = err
+ }
+ if onClose != nil {
+ err = onClose()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+ }
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+
+ var err error
+ cn.Inited = true
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
+ conn := newConn(c.opt, connPool, &c.hooksMixin)
+
+ username, password := "", ""
+ if c.opt.StreamingCredentialsProvider != nil {
+ credentials, unsubscribeFromCredentialsProvider, err := c.opt.StreamingCredentialsProvider.
+ Subscribe(c.newReAuthCredentialsListener(cn))
+ if err != nil {
+ return fmt.Errorf("failed to subscribe to streaming credentials: %w", err)
+ }
+ c.onClose = c.wrappedOnClose(unsubscribeFromCredentialsProvider)
+ cn.SetOnClose(unsubscribeFromCredentialsProvider)
+ username, password = credentials.BasicAuth()
+ } else if c.opt.CredentialsProviderContext != nil {
+ username, password, err = c.opt.CredentialsProviderContext(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get credentials from context provider: %w", err)
+ }
+ } else if c.opt.CredentialsProvider != nil {
+ username, password = c.opt.CredentialsProvider()
+ } else if c.opt.Username != "" || c.opt.Password != "" {
+ username, password = c.opt.Username, c.opt.Password
+ }
+
+ // for redis-server versions that do not support the HELLO command,
+ // RESP2 will continue to be used.
+ if err = conn.Hello(ctx, c.opt.Protocol, username, password, c.opt.ClientName).Err(); err == nil {
+ // Authentication successful with HELLO command
+ } else if !isRedisError(err) {
+ // When the server responds with the RESP protocol and the result is not a normal
+ // execution result of the HELLO command, we consider it to be an indication that
+ // the server does not support the HELLO command.
+ // The server may be a redis-server that does not support the HELLO command,
+ // or it could be DragonflyDB or a third-party redis-proxy. They all respond
+ // with different error string results for unsupported commands, making it
+ // difficult to rely on error strings to determine all results.
+ return err
+ } else if password != "" {
+ // Try legacy AUTH command if HELLO failed
+ if username != "" {
+ err = conn.AuthACL(ctx, username, password).Err()
+ } else {
+ err = conn.Auth(ctx, password).Err()
+ }
+ if err != nil {
+ return fmt.Errorf("failed to authenticate: %w", err)
+ }
+ }
+
+ _, err = conn.Pipelined(ctx, func(pipe Pipeliner) error {
+ if c.opt.DB > 0 {
+ pipe.Select(ctx, c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly(ctx)
+ }
+
+ if c.opt.ClientName != "" {
+ pipe.ClientSetName(ctx, c.opt.ClientName)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("failed to initialize connection options: %w", err)
+ }
+
+ if !c.opt.DisableIdentity && !c.opt.DisableIndentity {
+ libName := ""
+ libVer := Version()
+ if c.opt.IdentitySuffix != "" {
+ libName = c.opt.IdentitySuffix
+ }
+ p := conn.Pipeline()
+ p.ClientSetInfo(ctx, WithLibraryName(libName))
+ p.ClientSetInfo(ctx, WithLibraryVersion(libVer))
+ // Handle network errors (e.g. timeouts) in CLIENT SETINFO to avoid
+ // out of order responses later on.
+ if _, err = p.Exec(ctx); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(ctx, conn)
+ }
+
+ return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false, c.opt.Addr) {
+ c.connPool.Remove(ctx, cn, err)
+ } else {
+ c.connPool.Put(ctx, cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+
+ var fnErr error
+ defer func() {
+ c.releaseConn(ctx, cn, fnErr)
+ }()
+
+ fnErr = fn(ctx, cn)
+
+ return fnErr
+}
+
+func (c *baseClient) dial(ctx context.Context, network, addr string) (net.Conn, error) {
+ return c.opt.Dialer(ctx, network, addr)
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ attempt := attempt
+
+ retry, err := c._process(ctx, cmd, attempt)
+ if err == nil || !retry {
+ return err
+ }
+
+ lastErr = err
+ }
+ return lastErr
+}
+
+func (c *baseClient) assertUnstableCommand(cmd Cmder) bool {
+ switch cmd.(type) {
+ case *AggregateCmd, *FTInfoCmd, *FTSpellCheckCmd, *FTSearchCmd, *FTSynDumpCmd:
+ if c.opt.UnstableResp3 {
+ return true
+ } else {
+ panic("RESP3 responses for this command are disabled because they may still change. Please set the flag UnstableResp3 . See the [README](https://github.com/redis/go-redis/blob/master/README.md) and the release notes for guidance.")
+ }
+ default:
+ return false
+ }
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return false, err
+ }
+ }
+
+ retryTimeout := uint32(0)
+ if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ }); err != nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ return err
+ }
+ readReplyFunc := cmd.readReply
+ // Apply unstable RESP3 search module.
+ if c.opt.Protocol != 2 && c.assertUnstableCommand(cmd) {
+ readReplyFunc = cmd.readRawReply
+ }
+ if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), readReplyFunc); err != nil {
+ if cmd.readTimeout() == nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ } else {
+ atomic.StoreUint32(&retryTimeout, 0)
+ }
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+ return retry, err
+ }
+
+ return false, nil
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// context returns the context for the current connection.
+// If the context timeout is enabled, it returns the original context.
+// Otherwise, it returns a new background context.
+func (c *baseClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ // Enable retries by default to retry dial errors returned by withConn.
+ canRetry := true
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ }); err != nil {
+ return true, err
+ }
+
+ return false, nil
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for i, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+ if err != nil && !isRedisError(err) {
+ setCmdsErr(cmds[i+1:], err)
+ return err
+ }
+ }
+ // Retry errors like "LOADING redis is loading the dataset in memory".
+ return cmds[0].Err()
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return pipelineReadCmds(rd, trimmedCmds)
+ }); err != nil {
+ return false, err
+ }
+
+ return false, nil
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse +OK.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ // Parse +QUEUED.
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ if line[0] != proto.RespArray {
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more underlying connections.
+// It's safe for concurrent use by multiple goroutines.
+//
+// Client creates and frees connections automatically; it also maintains a free pool
+// of idle connections. You can control the pool size with Config.PoolSize option.
+type Client struct {
+ *baseClient
+ cmdable
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ if opt == nil {
+ panic("redis: NewClient nil options")
+ }
+ opt.init()
+
+ c := Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ c.init()
+ c.connPool = newConnPool(opt, c.dialHook)
+
+ return &c
+}
+
+func (c *Client) init() {
+ c.cmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := *c
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ clone.init()
+ return &clone
+}
+
+func (c *Client) Conn() *Conn {
+ return newConn(c.opt, pool.NewStickyConnPool(c.connPool), &c.hooksMixin)
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+// Channels can be omitted to create empty subscription.
+func (c *Client) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
+type Conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+}
+
+// newConn is a helper func to create a new Conn instance.
+// the Conn instance is not thread-safe and should not be shared between goroutines.
+// the parentHooks will be cloned, no need to clone before passing it.
+func newConn(opt *Options, connPool pool.Pooler, parentHooks *hooksMixin) *Conn {
+ c := Conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ }
+
+ if parentHooks != nil {
+ c.hooksMixin = parentHooks.clone()
+ }
+
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+
+ return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.processPipelineHook,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/redis/go-redis/v9/result.go b/vendor/github.com/redis/go-redis/v9/result.go
new file mode 100644
index 0000000..cfd4cf9
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/result.go
@@ -0,0 +1,188 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing.
+func NewCmdResult(val interface{}, err error) *Cmd {
+ var cmd Cmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing.
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+ var cmd SliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing.
+func NewStatusResult(val string, err error) *StatusCmd {
+ var cmd StatusCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing.
+func NewIntResult(val int64, err error) *IntCmd {
+ var cmd IntCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing.
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+ var cmd DurationCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing.
+func NewBoolResult(val bool, err error) *BoolCmd {
+ var cmd BoolCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing.
+func NewStringResult(val string, err error) *StringCmd {
+ var cmd StringCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing.
+func NewFloatResult(val float64, err error) *FloatCmd {
+ var cmd FloatCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+ var cmd StringSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+ var cmd BoolSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing.
+func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd {
+ var cmd MapStringStringCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewMapStringIntCmdResult returns a MapStringIntCmd initialised with val and err for testing.
+func NewMapStringIntCmdResult(val map[string]int64, err error) *MapStringIntCmd {
+ var cmd MapStringIntCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
+func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
+ var cmd TimeCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+ var cmd ZSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZWithKeyCmdResult returns a ZWithKeyCmd initialised with val and err for testing.
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+ var cmd ZWithKeyCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+ var cmd ScanCmd
+ cmd.page = keys
+ cmd.cursor = cursor
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+ var cmd ClusterSlotsCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+ var cmd GeoLocationCmd
+ cmd.locations = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+ var cmd GeoPosCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+ var cmd CommandsInfoCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+ var cmd XMessageSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+ var cmd XStreamSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXPendingResult returns a XPendingCmd initialised with val and err for testing.
+func NewXPendingResult(val *XPending, err error) *XPendingCmd {
+ var cmd XPendingCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/ring.go b/vendor/github.com/redis/go-redis/v9/ring.go
new file mode 100644
index 0000000..8a004b8
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/ring.go
@@ -0,0 +1,872 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/cespare/xxhash/v2"
+ "github.com/dgryski/go-rendezvous" //nolint
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+ Get(string) string
+}
+
+type rendezvousWrapper struct {
+ *rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+ return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+ return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+ // Map of name => host:port addresses of ring shards.
+ Addrs map[string]string
+
+ // NewClient creates a shard client with provided options.
+ NewClient func(opt *Options) *Client
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // Frequency of PING commands sent to check shards availability.
+ // Shard is considered down after 3 subsequent failed checks.
+ HeartbeatFrequency time.Duration
+
+ // NewConsistentHash returns a consistent hash that is used
+ // to distribute keys across the shards.
+ //
+ // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+ // for consistent hashing algorithmic tradeoffs.
+ NewConsistentHash func(shards []string) ConsistentHash
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+ Limiter Limiter
+
+ // DisableIndentity - Disable set-lib on connect.
+ //
+ // default: false
+ //
+ // Deprecated: Use DisableIdentity instead.
+ DisableIndentity bool
+
+ // DisableIdentity is used to disable CLIENT SETINFO command on connect.
+ //
+ // default: false
+ DisableIdentity bool
+ IdentitySuffix string
+ UnstableResp3 bool
+}
+
+func (opt *RingOptions) init() {
+ if opt.NewClient == nil {
+ opt.NewClient = func(opt *Options) *Client {
+ return NewClient(opt)
+ }
+ }
+
+ if opt.HeartbeatFrequency == 0 {
+ opt.HeartbeatFrequency = 500 * time.Millisecond
+ }
+
+ if opt.NewConsistentHash == nil {
+ opt.NewConsistentHash = newRendezvous
+ }
+
+ switch opt.MaxRetries {
+ case -1:
+ opt.MaxRetries = 0
+ case 0:
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+ return &Options{
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+ DB: opt.DB,
+
+ MaxRetries: -1,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+ Limiter: opt.Limiter,
+
+ DisableIdentity: opt.DisableIdentity,
+ DisableIndentity: opt.DisableIndentity,
+
+ IdentitySuffix: opt.IdentitySuffix,
+ UnstableResp3: opt.UnstableResp3,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+ Client *Client
+ down int32
+ addr string
+}
+
+func newRingShard(opt *RingOptions, addr string) *ringShard {
+ clopt := opt.clientOptions()
+ clopt.Addr = addr
+
+ return &ringShard{
+ Client: opt.NewClient(clopt),
+ addr: addr,
+ }
+}
+
+func (shard *ringShard) String() string {
+ var state string
+ if shard.IsUp() {
+ state = "up"
+ } else {
+ state = "down"
+ }
+ return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+ const threshold = 3
+ return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+ return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+ if up {
+ changed := shard.IsDown()
+ atomic.StoreInt32(&shard.down, 0)
+ return changed
+ }
+
+ if shard.IsDown() {
+ return false
+ }
+
+ atomic.AddInt32(&shard.down, 1)
+ return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringSharding struct {
+ opt *RingOptions
+
+ mu sync.RWMutex
+ shards *ringShards
+ closed bool
+ hash ConsistentHash
+ numShard int
+ onNewNode []func(rdb *Client)
+
+ // ensures exclusive access to SetAddrs so there is no need
+ // to hold mu for the duration of potentially long shard creation
+ setAddrsMu sync.Mutex
+}
+
+type ringShards struct {
+ m map[string]*ringShard
+ list []*ringShard
+}
+
+func newRingSharding(opt *RingOptions) *ringSharding {
+ c := &ringSharding{
+ opt: opt,
+ }
+ c.SetAddrs(opt.Addrs)
+
+ return c
+}
+
+func (c *ringSharding) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
+// SetAddrs replaces the shards in use, such that you can increase and
+// decrease number of shards, that you use. It will reuse shards that
+// existed before and close the ones that will not be used anymore.
+func (c *ringSharding) SetAddrs(addrs map[string]string) {
+ c.setAddrsMu.Lock()
+ defer c.setAddrsMu.Unlock()
+
+ cleanup := func(shards map[string]*ringShard) {
+ for addr, shard := range shards {
+ if err := shard.Client.Close(); err != nil {
+ internal.Logger.Printf(context.Background(), "shard.Close %s failed: %s", addr, err)
+ }
+ }
+ }
+
+ c.mu.RLock()
+ if c.closed {
+ c.mu.RUnlock()
+ return
+ }
+ existing := c.shards
+ c.mu.RUnlock()
+
+ shards, created, unused := c.newRingShards(addrs, existing)
+
+ c.mu.Lock()
+ if c.closed {
+ cleanup(created)
+ c.mu.Unlock()
+ return
+ }
+ c.shards = shards
+ c.rebalanceLocked()
+ c.mu.Unlock()
+
+ cleanup(unused)
+}
+
+func (c *ringSharding) newRingShards(
+ addrs map[string]string, existing *ringShards,
+) (shards *ringShards, created, unused map[string]*ringShard) {
+ shards = &ringShards{m: make(map[string]*ringShard, len(addrs))}
+ created = make(map[string]*ringShard) // indexed by addr
+ unused = make(map[string]*ringShard) // indexed by addr
+
+ if existing != nil {
+ for _, shard := range existing.list {
+ unused[shard.addr] = shard
+ }
+ }
+
+ for name, addr := range addrs {
+ if shard, ok := unused[addr]; ok {
+ shards.m[name] = shard
+ delete(unused, addr)
+ } else {
+ shard := newRingShard(c.opt, addr)
+ shards.m[name] = shard
+ created[addr] = shard
+
+ for _, fn := range c.onNewNode {
+ fn(shard.Client)
+ }
+ }
+ }
+
+ for _, shard := range shards.m {
+ shards.list = append(shards.list, shard)
+ }
+
+ return
+}
+
+// Warning: External exposure of `c.shards.list` may cause data races.
+// So keep internal or implement deep copy if exposed.
+func (c *ringSharding) List() []*ringShard {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil
+ }
+ return c.shards.list
+}
+
+func (c *ringSharding) Hash(key string) string {
+ key = hashtag.Key(key)
+
+ var hash string
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.numShard > 0 {
+ hash = c.hash.Get(key)
+ }
+
+ return hash
+}
+
+func (c *ringSharding) GetByKey(key string) (*ringShard, error) {
+ key = hashtag.Key(key)
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if c.numShard == 0 {
+ return nil, errRingShardsDown
+ }
+
+ shardName := c.hash.Get(key)
+ if shardName == "" {
+ return nil, errRingShardsDown
+ }
+ return c.shards.m[shardName], nil
+}
+
+func (c *ringSharding) GetByName(shardName string) (*ringShard, error) {
+ if shardName == "" {
+ return c.Random()
+ }
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ return c.shards.m[shardName], nil
+}
+
+func (c *ringSharding) Random() (*ringShard, error) {
+ return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// Heartbeat monitors state of each shard in the ring.
+func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ var rebalance bool
+
+ // note: `c.List()` return a shadow copy of `[]*ringShard`.
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(ctx, "ring shard state changed: %s", shard)
+ rebalance = true
+ }
+ }
+
+ if rebalance {
+ c.mu.Lock()
+ c.rebalanceLocked()
+ c.mu.Unlock()
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// rebalanceLocked removes dead shards from the Ring.
+// Requires c.mu locked.
+func (c *ringSharding) rebalanceLocked() {
+ if c.closed {
+ return
+ }
+ if c.shards == nil {
+ return
+ }
+
+ liveShards := make([]string, 0, len(c.shards.m))
+
+ for name, shard := range c.shards.m {
+ if shard.IsUp() {
+ liveShards = append(liveShards, name)
+ }
+ }
+
+ c.hash = c.opt.NewConsistentHash(liveShards)
+ c.numShard = len(liveShards)
+}
+
+func (c *ringSharding) Len() int {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ return c.numShard
+}
+
+func (c *ringSharding) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+
+ for _, shard := range c.shards.list {
+ if err := shard.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.hash = nil
+ c.shards = nil
+ c.numShard = 0
+
+ return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+ cmdable
+ hooksMixin
+
+ opt *RingOptions
+ sharding *ringSharding
+ cmdsInfoCache *cmdsInfoCache
+ heartbeatCancelFn context.CancelFunc
+}
+
+func NewRing(opt *RingOptions) *Ring {
+ if opt == nil {
+ panic("redis: NewRing nil options")
+ }
+ opt.init()
+
+ hbCtx, hbCancel := context.WithCancel(context.Background())
+
+ ring := Ring{
+ opt: opt,
+ sharding: newRingSharding(opt),
+ heartbeatCancelFn: hbCancel,
+ }
+
+ ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+ ring.cmdable = ring.Process
+
+ ring.initHooks(hooks{
+ process: ring.process,
+ pipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, false)
+ },
+ txPipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, true)
+ },
+ })
+
+ go ring.sharding.Heartbeat(hbCtx, opt.HeartbeatFrequency)
+
+ return &ring
+}
+
+func (c *Ring) SetAddrs(addrs map[string]string) {
+ c.sharding.SetAddrs(addrs)
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+ return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+ // note: `c.List()` return a shadow copy of `[]*ringShard`.
+ shards := c.sharding.List()
+ var acc PoolStats
+ for _, shard := range shards {
+ s := shard.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ }
+ return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+ return c.sharding.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.Subscribe(ctx, channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.PSubscribe(ctx, channels...)
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *Ring) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.SSubscribe(ctx, channels...)
+}
+
+func (c *Ring) OnNewNode(fn func(rdb *Client)) {
+ c.sharding.OnNewNode(fn)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ // note: `c.List()` return a shadow copy of `[]*ringShard`.
+ shards := c.sharding.List()
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, shard := range shards {
+ if shard.IsDown() {
+ continue
+ }
+
+ wg.Add(1)
+ go func(shard *ringShard) {
+ defer wg.Done()
+ err := fn(ctx, shard.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(shard)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ // note: `c.List()` return a shadow copy of `[]*ringShard`.
+ shards := c.sharding.List()
+ var firstErr error
+ for _, shard := range shards {
+ cmdsInfo, err := shard.Client.Command(ctx).Result()
+ if err == nil {
+ return cmdsInfo, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ if firstErr == nil {
+ return nil, errRingShardsDown
+ }
+ return nil, firstErr
+}
+
+func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
+ pos := cmdFirstKeyPos(cmd)
+ if pos == 0 {
+ return c.sharding.Random()
+ }
+ firstKey := cmd.stringArg(pos)
+ return c.sharding.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ shard, err := c.cmdShard(cmd)
+ if err != nil {
+ return err
+ }
+
+ lastErr = shard.Client.Process(ctx, cmd)
+ if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, tx bool,
+) error {
+ if tx {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+ }
+
+ cmdsMap := make(map[string][]Cmder)
+
+ for _, cmd := range cmds {
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd))
+ if hash != "" {
+ hash = c.sharding.Hash(hash)
+ }
+ cmdsMap[hash] = append(cmdsMap[hash], cmd)
+ }
+
+ var wg sync.WaitGroup
+ for hash, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(hash string, cmds []Cmder) {
+ defer wg.Done()
+
+ // TODO: retry?
+ shard, err := c.sharding.GetByName(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return
+ }
+
+ if tx {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = shard.Client.processTxPipelineHook(ctx, cmds)
+ } else {
+ _ = shard.Client.processPipelineHook(ctx, cmds)
+ }
+ }(hash, cmds)
+ }
+
+ wg.Wait()
+ return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ var shards []*ringShard
+
+ for _, key := range keys {
+ if key != "" {
+ shard, err := c.sharding.GetByKey(key)
+ if err != nil {
+ return err
+ }
+
+ shards = append(shards, shard)
+ }
+ }
+
+ if len(shards) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one shard")
+ }
+
+ if len(shards) > 1 {
+ for _, shard := range shards[1:] {
+ if shard.Client != shards[0].Client {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+ return err
+ }
+ }
+ }
+
+ return shards[0].Client.Watch(ctx, fn, keys...)
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ c.heartbeatCancelFn()
+
+ return c.sharding.Close()
+}
+
+// GetShardClients returns a list of all shard clients in the ring.
+// This can be used to create dedicated connections (e.g., PubSub) for each shard.
+func (c *Ring) GetShardClients() []*Client {
+ shards := c.sharding.List()
+ clients := make([]*Client, 0, len(shards))
+ for _, shard := range shards {
+ if shard.IsUp() {
+ clients = append(clients, shard.Client)
+ }
+ }
+ return clients
+}
+
+// GetShardClientForKey returns the shard client that would handle the given key.
+// This can be used to determine which shard a particular key/channel would be routed to.
+func (c *Ring) GetShardClientForKey(key string) (*Client, error) {
+ shard, err := c.sharding.GetByKey(key)
+ if err != nil {
+ return nil, err
+ }
+ return shard.Client, nil
+}
diff --git a/vendor/github.com/redis/go-redis/v9/script.go b/vendor/github.com/redis/go-redis/v9/script.go
new file mode 100644
index 0000000..626ab03
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/script.go
@@ -0,0 +1,84 @@
+package redis
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+)
+
+type Scripter interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+ _ Scripter = (*Client)(nil)
+ _ Scripter = (*Ring)(nil)
+ _ Scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ _, _ = io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
+ return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
+ return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalRO(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+func (s *Script) EvalShaRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalShaRO(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(ctx, c, keys, args...)
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
+ return s.Eval(ctx, c, keys, args...)
+ }
+ return r
+}
+
+// RunRO optimistically uses EVALSHA_RO to run the script. If script does not exist
+// it is retried using EVAL_RO.
+func (s *Script) RunRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalShaRO(ctx, c, keys, args...)
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
+ return s.EvalRO(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/vendor/github.com/redis/go-redis/v9/scripting_commands.go b/vendor/github.com/redis/go-redis/v9/scripting_commands.go
new file mode 100644
index 0000000..af9c339
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/scripting_commands.go
@@ -0,0 +1,215 @@
+package redis
+
+import "context"
+
+type ScriptingFunctionsCmdable interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ FunctionLoad(ctx context.Context, code string) *StringCmd
+ FunctionLoadReplace(ctx context.Context, code string) *StringCmd
+ FunctionDelete(ctx context.Context, libName string) *StringCmd
+ FunctionFlush(ctx context.Context) *StringCmd
+ FunctionKill(ctx context.Context) *StringCmd
+ FunctionFlushAsync(ctx context.Context) *StringCmd
+ FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd
+ FunctionDump(ctx context.Context) *StringCmd
+ FunctionRestore(ctx context.Context, libDump string) *StringCmd
+ FunctionStats(ctx context.Context) *FunctionStatsCmd
+ FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+}
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval", script, keys, args...)
+}
+
+func (c cmdable) EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval_ro", script, keys, args...)
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha", sha1, keys, args...)
+}
+
+func (c cmdable) EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha_ro", sha1, keys, args...)
+}
+
+func (c cmdable) eval(ctx context.Context, name, payload string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = name
+ cmdArgs[1] = payload
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+
+ // it is possible that only args exist without a key.
+ // rdb.eval(ctx, eval, script, nil, arg1, arg2)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ------------------------------------------------------------------------------
+
+// FunctionListQuery is used with FunctionList to query for Redis libraries
+//
+// LibraryNamePattern - Use an empty string to get all libraries.
+// - Use a glob-style pattern to match multiple libraries with a matching name
+// - Use a library's full name to match a single library
+// WithCode - If true, it will return the code of the library
+type FunctionListQuery struct {
+ LibraryNamePattern string
+ WithCode bool
+}
+
+func (c cmdable) FunctionLoad(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionLoadReplace(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", "replace", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDelete(ctx context.Context, libName string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "delete", libName)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlush(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionKill(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlushAsync(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd {
+ args := make([]interface{}, 2, 5)
+ args[0] = "function"
+ args[1] = "list"
+ if q.LibraryNamePattern != "" {
+ args = append(args, "libraryname", q.LibraryNamePattern)
+ }
+ if q.WithCode {
+ args = append(args, "withcode")
+ }
+ cmd := NewFunctionListCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDump(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "dump")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionRestore(ctx context.Context, libDump string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "restore", libDump)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionStats(ctx context.Context) *FunctionStatsCmd {
+ cmd := NewFunctionStatsCmd(ctx, "function", "stats")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FCallRo this function simply calls FCallRO,
+// Deprecated: to maintain convention FCallRO.
+func (c cmdable) FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ return c.FCallRO(ctx, function, keys, args...)
+}
+
+func (c cmdable) FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall_ro", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func fcallArgs(command string, function string, keys []string, args ...interface{}) []interface{} {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = command
+ cmdArgs[1] = function
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+
+ cmdArgs = append(cmdArgs, args...)
+ return cmdArgs
+}
diff --git a/vendor/github.com/redis/go-redis/v9/search_commands.go b/vendor/github.com/redis/go-redis/v9/search_commands.go
new file mode 100644
index 0000000..b31baaa
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/search_commands.go
@@ -0,0 +1,2103 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type SearchCmdable interface {
+ FT_List(ctx context.Context) *StringSliceCmd
+ FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd
+ FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd
+ FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd
+ FTAliasDel(ctx context.Context, alias string) *StatusCmd
+ FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd
+ FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd
+ FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd
+ FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd
+ FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd
+ FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd
+ FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd
+ FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd
+ FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd
+ FTDictDump(ctx context.Context, dict string) *StringSliceCmd
+ FTDropIndex(ctx context.Context, index string) *StatusCmd
+ FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd
+ FTExplain(ctx context.Context, index string, query string) *StringCmd
+ FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd
+ FTInfo(ctx context.Context, index string) *FTInfoCmd
+ FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd
+ FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd
+ FTSearch(ctx context.Context, index string, query string) *FTSearchCmd
+ FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd
+ FTSynDump(ctx context.Context, index string) *FTSynDumpCmd
+ FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd
+ FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd
+ FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd
+}
+
+type FTCreateOptions struct {
+ OnHash bool
+ OnJSON bool
+ Prefix []interface{}
+ Filter string
+ DefaultLanguage string
+ LanguageField string
+ Score float64
+ ScoreField string
+ PayloadField string
+ MaxTextFields int
+ NoOffsets bool
+ Temporary int
+ NoHL bool
+ NoFields bool
+ NoFreqs bool
+ StopWords []interface{}
+ SkipInitialScan bool
+}
+
+type FieldSchema struct {
+ FieldName string
+ As string
+ FieldType SearchFieldType
+ Sortable bool
+ UNF bool
+ NoStem bool
+ NoIndex bool
+ PhoneticMatcher string
+ Weight float64
+ Separator string
+ CaseSensitive bool
+ WithSuffixtrie bool
+ VectorArgs *FTVectorArgs
+ GeoShapeFieldType string
+ IndexEmpty bool
+ IndexMissing bool
+}
+
+type FTVectorArgs struct {
+ FlatOptions *FTFlatOptions
+ HNSWOptions *FTHNSWOptions
+}
+
+type FTFlatOptions struct {
+ Type string
+ Dim int
+ DistanceMetric string
+ InitialCapacity int
+ BlockSize int
+}
+
+type FTHNSWOptions struct {
+ Type string
+ Dim int
+ DistanceMetric string
+ InitialCapacity int
+ MaxEdgesPerNode int
+ MaxAllowedEdgesPerNode int
+ EFRunTime int
+ Epsilon float64
+}
+
+type FTDropIndexOptions struct {
+ DeleteDocs bool
+}
+
+type SpellCheckTerms struct {
+ Include bool
+ Exclude bool
+ Dictionary string
+}
+
+type FTExplainOptions struct {
+ // Dialect 1,3 and 4 are deprecated since redis 8.0
+ Dialect string
+}
+
+type FTSynUpdateOptions struct {
+ SkipInitialScan bool
+}
+
+type SearchAggregator int
+
+const (
+ SearchInvalid = SearchAggregator(iota)
+ SearchAvg
+ SearchSum
+ SearchMin
+ SearchMax
+ SearchCount
+ SearchCountDistinct
+ SearchCountDistinctish
+ SearchStdDev
+ SearchQuantile
+ SearchToList
+ SearchFirstValue
+ SearchRandomSample
+)
+
+func (a SearchAggregator) String() string {
+ switch a {
+ case SearchInvalid:
+ return ""
+ case SearchAvg:
+ return "AVG"
+ case SearchSum:
+ return "SUM"
+ case SearchMin:
+ return "MIN"
+ case SearchMax:
+ return "MAX"
+ case SearchCount:
+ return "COUNT"
+ case SearchCountDistinct:
+ return "COUNT_DISTINCT"
+ case SearchCountDistinctish:
+ return "COUNT_DISTINCTISH"
+ case SearchStdDev:
+ return "STDDEV"
+ case SearchQuantile:
+ return "QUANTILE"
+ case SearchToList:
+ return "TOLIST"
+ case SearchFirstValue:
+ return "FIRST_VALUE"
+ case SearchRandomSample:
+ return "RANDOM_SAMPLE"
+ default:
+ return ""
+ }
+}
+
+type SearchFieldType int
+
+const (
+ SearchFieldTypeInvalid = SearchFieldType(iota)
+ SearchFieldTypeNumeric
+ SearchFieldTypeTag
+ SearchFieldTypeText
+ SearchFieldTypeGeo
+ SearchFieldTypeVector
+ SearchFieldTypeGeoShape
+)
+
+func (t SearchFieldType) String() string {
+ switch t {
+ case SearchFieldTypeInvalid:
+ return ""
+ case SearchFieldTypeNumeric:
+ return "NUMERIC"
+ case SearchFieldTypeTag:
+ return "TAG"
+ case SearchFieldTypeText:
+ return "TEXT"
+ case SearchFieldTypeGeo:
+ return "GEO"
+ case SearchFieldTypeVector:
+ return "VECTOR"
+ case SearchFieldTypeGeoShape:
+ return "GEOSHAPE"
+ default:
+ return "TEXT"
+ }
+}
+
+// Each AggregateReducer have different args.
+// Please follow https://redis.io/docs/interact/search-and-query/search/aggregations/#supported-groupby-reducers for more information.
+type FTAggregateReducer struct {
+ Reducer SearchAggregator
+ Args []interface{}
+ As string
+}
+
+type FTAggregateGroupBy struct {
+ Fields []interface{}
+ Reduce []FTAggregateReducer
+}
+
+type FTAggregateSortBy struct {
+ FieldName string
+ Asc bool
+ Desc bool
+}
+
+type FTAggregateApply struct {
+ Field string
+ As string
+}
+
+type FTAggregateLoad struct {
+ Field string
+ As string
+}
+
+type FTAggregateWithCursor struct {
+ Count int
+ MaxIdle int
+}
+
+type FTAggregateOptions struct {
+ Verbatim bool
+ LoadAll bool
+ Load []FTAggregateLoad
+ Timeout int
+ GroupBy []FTAggregateGroupBy
+ SortBy []FTAggregateSortBy
+ SortByMax int
+ // Scorer is used to set scoring function, if not set passed, a default will be used.
+ // The default scorer depends on the Redis version:
+ // - `BM25` for Redis >= 8
+ // - `TFIDF` for Redis < 8
+ Scorer string
+ // AddScores is available in Redis CE 8
+ AddScores bool
+ Apply []FTAggregateApply
+ LimitOffset int
+ Limit int
+ Filter string
+ WithCursor bool
+ WithCursorOptions *FTAggregateWithCursor
+ Params map[string]interface{}
+ // Dialect 1,3 and 4 are deprecated since redis 8.0
+ DialectVersion int
+}
+
+type FTSearchFilter struct {
+ FieldName interface{}
+ Min interface{}
+ Max interface{}
+}
+
+type FTSearchGeoFilter struct {
+ FieldName string
+ Longitude float64
+ Latitude float64
+ Radius float64
+ Unit string
+}
+
+type FTSearchReturn struct {
+ FieldName string
+ As string
+}
+
+type FTSearchSortBy struct {
+ FieldName string
+ Asc bool
+ Desc bool
+}
+
+// FTSearchOptions hold options that can be passed to the FT.SEARCH command.
+// More information about the options can be found
+// in the documentation for FT.SEARCH https://redis.io/docs/latest/commands/ft.search/
+type FTSearchOptions struct {
+ NoContent bool
+ Verbatim bool
+ NoStopWords bool
+ WithScores bool
+ WithPayloads bool
+ WithSortKeys bool
+ Filters []FTSearchFilter
+ GeoFilter []FTSearchGeoFilter
+ InKeys []interface{}
+ InFields []interface{}
+ Return []FTSearchReturn
+ Slop int
+ Timeout int
+ InOrder bool
+ Language string
+ Expander string
+ // Scorer is used to set scoring function, if not set passed, a default will be used.
+ // The default scorer depends on the Redis version:
+ // - `BM25` for Redis >= 8
+ // - `TFIDF` for Redis < 8
+ Scorer string
+ ExplainScore bool
+ Payload string
+ SortBy []FTSearchSortBy
+ SortByWithCount bool
+ LimitOffset int
+ Limit int
+ // CountOnly sets LIMIT 0 0 to get the count - number of documents in the result set without actually returning the result set.
+ // When using this option, the Limit and LimitOffset options are ignored.
+ CountOnly bool
+ Params map[string]interface{}
+ // Dialect 1,3 and 4 are deprecated since redis 8.0
+ DialectVersion int
+}
+
+type FTSynDumpResult struct {
+ Term string
+ Synonyms []string
+}
+
+type FTSynDumpCmd struct {
+ baseCmd
+ val []FTSynDumpResult
+}
+
+type FTAggregateResult struct {
+ Total int
+ Rows []AggregateRow
+}
+
+type AggregateRow struct {
+ Fields map[string]interface{}
+}
+
+type AggregateCmd struct {
+ baseCmd
+ val *FTAggregateResult
+}
+
+type FTInfoResult struct {
+ IndexErrors IndexErrors
+ Attributes []FTAttribute
+ BytesPerRecordAvg string
+ Cleaning int
+ CursorStats CursorStats
+ DialectStats map[string]int
+ DocTableSizeMB float64
+ FieldStatistics []FieldStatistic
+ GCStats GCStats
+ GeoshapesSzMB float64
+ HashIndexingFailures int
+ IndexDefinition IndexDefinition
+ IndexName string
+ IndexOptions []string
+ Indexing int
+ InvertedSzMB float64
+ KeyTableSizeMB float64
+ MaxDocID int
+ NumDocs int
+ NumRecords int
+ NumTerms int
+ NumberOfUses int
+ OffsetBitsPerRecordAvg string
+ OffsetVectorsSzMB float64
+ OffsetsPerTermAvg string
+ PercentIndexed float64
+ RecordsPerDocAvg string
+ SortableValuesSizeMB float64
+ TagOverheadSzMB float64
+ TextOverheadSzMB float64
+ TotalIndexMemorySzMB float64
+ TotalIndexingTime int
+ TotalInvertedIndexBlocks int
+ VectorIndexSzMB float64
+}
+
+type IndexErrors struct {
+ IndexingFailures int
+ LastIndexingError string
+ LastIndexingErrorKey string
+}
+
+type FTAttribute struct {
+ Identifier string
+ Attribute string
+ Type string
+ Weight float64
+ Sortable bool
+ NoStem bool
+ NoIndex bool
+ UNF bool
+ PhoneticMatcher string
+ CaseSensitive bool
+ WithSuffixtrie bool
+}
+
+type CursorStats struct {
+ GlobalIdle int
+ GlobalTotal int
+ IndexCapacity int
+ IndexTotal int
+}
+
+type FieldStatistic struct {
+ Identifier string
+ Attribute string
+ IndexErrors IndexErrors
+}
+
+type GCStats struct {
+ BytesCollected int
+ TotalMsRun int
+ TotalCycles int
+ AverageCycleTimeMs string
+ LastRunTimeMs int
+ GCNumericTreesMissed int
+ GCBlocksDenied int
+}
+
+type IndexDefinition struct {
+ KeyType string
+ Prefixes []string
+ DefaultScore float64
+}
+
+type FTSpellCheckOptions struct {
+ Distance int
+ Terms *FTSpellCheckTerms
+ // Dialect 1,3 and 4 are deprecated since redis 8.0
+ Dialect int
+}
+
+type FTSpellCheckTerms struct {
+ Inclusion string // Either "INCLUDE" or "EXCLUDE"
+ Dictionary string
+ Terms []interface{}
+}
+
+type SpellCheckResult struct {
+ Term string
+ Suggestions []SpellCheckSuggestion
+}
+
+type SpellCheckSuggestion struct {
+ Score float64
+ Suggestion string
+}
+
+type FTSearchResult struct {
+ Total int
+ Docs []Document
+}
+
+type Document struct {
+ ID string
+ Score *float64
+ Payload *string
+ SortKey *string
+ Fields map[string]string
+}
+
+type AggregateQuery []interface{}
+
+// FT_List - Lists all the existing indexes in the database.
+// For more information, please refer to the Redis documentation:
+// [FT._LIST]: (https://redis.io/commands/ft._list/)
+func (c cmdable) FT_List(ctx context.Context) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "FT._LIST")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAggregate - Performs a search query on an index and applies a series of aggregate transformations to the result.
+// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query.
+// For more information, please refer to the Redis documentation:
+// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/)
+func (c cmdable) FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd {
+ args := []interface{}{"FT.AGGREGATE", index, query}
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func FTAggregateQuery(query string, options *FTAggregateOptions) AggregateQuery {
+ queryArgs := []interface{}{query}
+ if options != nil {
+ if options.Verbatim {
+ queryArgs = append(queryArgs, "VERBATIM")
+ }
+
+ if options.Scorer != "" {
+ queryArgs = append(queryArgs, "SCORER", options.Scorer)
+ }
+
+ if options.AddScores {
+ queryArgs = append(queryArgs, "ADDSCORES")
+ }
+
+ if options.LoadAll && options.Load != nil {
+ panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive")
+ }
+ if options.LoadAll {
+ queryArgs = append(queryArgs, "LOAD", "*")
+ }
+ if options.Load != nil {
+ queryArgs = append(queryArgs, "LOAD", len(options.Load))
+ index, count := len(queryArgs)-1, 0
+ for _, load := range options.Load {
+ queryArgs = append(queryArgs, load.Field)
+ count++
+ if load.As != "" {
+ queryArgs = append(queryArgs, "AS", load.As)
+ count += 2
+ }
+ }
+ queryArgs[index] = count
+ }
+
+ if options.Timeout > 0 {
+ queryArgs = append(queryArgs, "TIMEOUT", options.Timeout)
+ }
+
+ for _, apply := range options.Apply {
+ queryArgs = append(queryArgs, "APPLY", apply.Field)
+ if apply.As != "" {
+ queryArgs = append(queryArgs, "AS", apply.As)
+ }
+ }
+
+ if options.GroupBy != nil {
+ for _, groupBy := range options.GroupBy {
+ queryArgs = append(queryArgs, "GROUPBY", len(groupBy.Fields))
+ queryArgs = append(queryArgs, groupBy.Fields...)
+
+ for _, reducer := range groupBy.Reduce {
+ queryArgs = append(queryArgs, "REDUCE")
+ queryArgs = append(queryArgs, reducer.Reducer.String())
+ if reducer.Args != nil {
+ queryArgs = append(queryArgs, len(reducer.Args))
+ queryArgs = append(queryArgs, reducer.Args...)
+ } else {
+ queryArgs = append(queryArgs, 0)
+ }
+ if reducer.As != "" {
+ queryArgs = append(queryArgs, "AS", reducer.As)
+ }
+ }
+ }
+ }
+ if options.SortBy != nil {
+ queryArgs = append(queryArgs, "SORTBY")
+ sortByOptions := []interface{}{}
+ for _, sortBy := range options.SortBy {
+ sortByOptions = append(sortByOptions, sortBy.FieldName)
+ if sortBy.Asc && sortBy.Desc {
+ panic("FT.AGGREGATE: ASC and DESC are mutually exclusive")
+ }
+ if sortBy.Asc {
+ sortByOptions = append(sortByOptions, "ASC")
+ }
+ if sortBy.Desc {
+ sortByOptions = append(sortByOptions, "DESC")
+ }
+ }
+ queryArgs = append(queryArgs, len(sortByOptions))
+ queryArgs = append(queryArgs, sortByOptions...)
+ }
+ if options.SortByMax > 0 {
+ queryArgs = append(queryArgs, "MAX", options.SortByMax)
+ }
+ if options.LimitOffset >= 0 && options.Limit > 0 {
+ queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit)
+ }
+ if options.Filter != "" {
+ queryArgs = append(queryArgs, "FILTER", options.Filter)
+ }
+ if options.WithCursor {
+ queryArgs = append(queryArgs, "WITHCURSOR")
+ if options.WithCursorOptions != nil {
+ if options.WithCursorOptions.Count > 0 {
+ queryArgs = append(queryArgs, "COUNT", options.WithCursorOptions.Count)
+ }
+ if options.WithCursorOptions.MaxIdle > 0 {
+ queryArgs = append(queryArgs, "MAXIDLE", options.WithCursorOptions.MaxIdle)
+ }
+ }
+ }
+ if options.Params != nil {
+ queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2)
+ for key, value := range options.Params {
+ queryArgs = append(queryArgs, key, value)
+ }
+ }
+
+ if options.DialectVersion > 0 {
+ queryArgs = append(queryArgs, "DIALECT", options.DialectVersion)
+ } else {
+ queryArgs = append(queryArgs, "DIALECT", 2)
+ }
+ }
+ return queryArgs
+}
+
+func ProcessAggregateResult(data []interface{}) (*FTAggregateResult, error) {
+ if len(data) == 0 {
+ return nil, fmt.Errorf("no data returned")
+ }
+
+ total, ok := data[0].(int64)
+ if !ok {
+ return nil, fmt.Errorf("invalid total format")
+ }
+
+ rows := make([]AggregateRow, 0, len(data)-1)
+ for _, row := range data[1:] {
+ fields, ok := row.([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("invalid row format")
+ }
+
+ rowMap := make(map[string]interface{})
+ for i := 0; i < len(fields); i += 2 {
+ key, ok := fields[i].(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid field key format")
+ }
+ value := fields[i+1]
+ rowMap[key] = value
+ }
+ rows = append(rows, AggregateRow{Fields: rowMap})
+ }
+
+ result := &FTAggregateResult{
+ Total: int(total),
+ Rows: rows,
+ }
+ return result, nil
+}
+
+func NewAggregateCmd(ctx context.Context, args ...interface{}) *AggregateCmd {
+ return &AggregateCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *AggregateCmd) SetVal(val *FTAggregateResult) {
+ cmd.val = val
+}
+
+func (cmd *AggregateCmd) Val() *FTAggregateResult {
+ return cmd.val
+}
+
+func (cmd *AggregateCmd) Result() (*FTAggregateResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *AggregateCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *AggregateCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+
+func (cmd *AggregateCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *AggregateCmd) readReply(rd *proto.Reader) (err error) {
+ data, err := rd.ReadSlice()
+ if err != nil {
+ return err
+ }
+ cmd.val, err = ProcessAggregateResult(data)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// FTAggregateWithArgs - Performs a search query on an index and applies a series of aggregate transformations to the result.
+// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query.
+// This function also allows for specifying additional options such as: Verbatim, LoadAll, Load, Timeout, GroupBy, SortBy, SortByMax, Apply, LimitOffset, Limit, Filter, WithCursor, Params, and DialectVersion.
+// For more information, please refer to the Redis documentation:
+// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/)
+func (c cmdable) FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd {
+ args := []interface{}{"FT.AGGREGATE", index, query}
+ if options != nil {
+ if options.Verbatim {
+ args = append(args, "VERBATIM")
+ }
+ if options.Scorer != "" {
+ args = append(args, "SCORER", options.Scorer)
+ }
+ if options.AddScores {
+ args = append(args, "ADDSCORES")
+ }
+ if options.LoadAll && options.Load != nil {
+ panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive")
+ }
+ if options.LoadAll {
+ args = append(args, "LOAD", "*")
+ }
+ if options.Load != nil {
+ args = append(args, "LOAD", len(options.Load))
+ index, count := len(args)-1, 0
+ for _, load := range options.Load {
+ args = append(args, load.Field)
+ count++
+ if load.As != "" {
+ args = append(args, "AS", load.As)
+ count += 2
+ }
+ }
+ args[index] = count
+ }
+ if options.Timeout > 0 {
+ args = append(args, "TIMEOUT", options.Timeout)
+ }
+ for _, apply := range options.Apply {
+ args = append(args, "APPLY", apply.Field)
+ if apply.As != "" {
+ args = append(args, "AS", apply.As)
+ }
+ }
+ if options.GroupBy != nil {
+ for _, groupBy := range options.GroupBy {
+ args = append(args, "GROUPBY", len(groupBy.Fields))
+ args = append(args, groupBy.Fields...)
+
+ for _, reducer := range groupBy.Reduce {
+ args = append(args, "REDUCE")
+ args = append(args, reducer.Reducer.String())
+ if reducer.Args != nil {
+ args = append(args, len(reducer.Args))
+ args = append(args, reducer.Args...)
+ } else {
+ args = append(args, 0)
+ }
+ if reducer.As != "" {
+ args = append(args, "AS", reducer.As)
+ }
+ }
+ }
+ }
+ if options.SortBy != nil {
+ args = append(args, "SORTBY")
+ sortByOptions := []interface{}{}
+ for _, sortBy := range options.SortBy {
+ sortByOptions = append(sortByOptions, sortBy.FieldName)
+ if sortBy.Asc && sortBy.Desc {
+ panic("FT.AGGREGATE: ASC and DESC are mutually exclusive")
+ }
+ if sortBy.Asc {
+ sortByOptions = append(sortByOptions, "ASC")
+ }
+ if sortBy.Desc {
+ sortByOptions = append(sortByOptions, "DESC")
+ }
+ }
+ args = append(args, len(sortByOptions))
+ args = append(args, sortByOptions...)
+ }
+ if options.SortByMax > 0 {
+ args = append(args, "MAX", options.SortByMax)
+ }
+ if options.LimitOffset >= 0 && options.Limit > 0 {
+ args = append(args, "LIMIT", options.LimitOffset, options.Limit)
+ }
+ if options.Filter != "" {
+ args = append(args, "FILTER", options.Filter)
+ }
+ if options.WithCursor {
+ args = append(args, "WITHCURSOR")
+ if options.WithCursorOptions != nil {
+ if options.WithCursorOptions.Count > 0 {
+ args = append(args, "COUNT", options.WithCursorOptions.Count)
+ }
+ if options.WithCursorOptions.MaxIdle > 0 {
+ args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle)
+ }
+ }
+ }
+ if options.Params != nil {
+ args = append(args, "PARAMS", len(options.Params)*2)
+ for key, value := range options.Params {
+ args = append(args, key, value)
+ }
+ }
+ if options.DialectVersion > 0 {
+ args = append(args, "DIALECT", options.DialectVersion)
+ } else {
+ args = append(args, "DIALECT", 2)
+ }
+ }
+
+ cmd := NewAggregateCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAliasAdd - Adds an alias to an index.
+// The 'index' parameter specifies the index to which the alias is added, and the 'alias' parameter specifies the alias.
+// For more information, please refer to the Redis documentation:
+// [FT.ALIASADD]: (https://redis.io/commands/ft.aliasadd/)
+func (c cmdable) FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd {
+ args := []interface{}{"FT.ALIASADD", alias, index}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAliasDel - Removes an alias from an index.
+// The 'alias' parameter specifies the alias to be removed.
+// For more information, please refer to the Redis documentation:
+// [FT.ALIASDEL]: (https://redis.io/commands/ft.aliasdel/)
+func (c cmdable) FTAliasDel(ctx context.Context, alias string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "FT.ALIASDEL", alias)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAliasUpdate - Updates an alias to an index.
+// The 'index' parameter specifies the index to which the alias is updated, and the 'alias' parameter specifies the alias.
+// If the alias already exists for a different index, it updates the alias to point to the specified index instead.
+// For more information, please refer to the Redis documentation:
+// [FT.ALIASUPDATE]: (https://redis.io/commands/ft.aliasupdate/)
+func (c cmdable) FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "FT.ALIASUPDATE", alias, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTAlter - Alters the definition of an existing index.
+// The 'index' parameter specifies the index to alter, and the 'skipInitialScan' parameter specifies whether to skip the initial scan.
+// The 'definition' parameter specifies the new definition for the index.
+// For more information, please refer to the Redis documentation:
+// [FT.ALTER]: (https://redis.io/commands/ft.alter/)
+func (c cmdable) FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd {
+ args := []interface{}{"FT.ALTER", index}
+ if skipInitialScan {
+ args = append(args, "SKIPINITIALSCAN")
+ }
+ args = append(args, "SCHEMA", "ADD")
+ args = append(args, definition...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Retrieves the value of a RediSearch configuration parameter.
+// The 'option' parameter specifies the configuration parameter to retrieve.
+// For more information, please refer to the Redis [FT.CONFIG GET] documentation.
+//
+// Deprecated: FTConfigGet is deprecated in Redis 8.
+// All configuration will be done with the CONFIG GET command.
+// For more information check [Client.ConfigGet] and [CONFIG GET Documentation]
+//
+// [CONFIG GET Documentation]: https://redis.io/commands/config-get/
+// [FT.CONFIG GET]: https://redis.io/commands/ft.config-get/
+func (c cmdable) FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd {
+ cmd := NewMapMapStringInterfaceCmd(ctx, "FT.CONFIG", "GET", option)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Sets the value of a RediSearch configuration parameter.
+// The 'option' parameter specifies the configuration parameter to set, and the 'value' parameter specifies the new value.
+// For more information, please refer to the Redis [FT.CONFIG SET] documentation.
+//
+// Deprecated: FTConfigSet is deprecated in Redis 8.
+// All configuration will be done with the CONFIG SET command.
+// For more information check [Client.ConfigSet] and [CONFIG SET Documentation]
+//
+// [CONFIG SET Documentation]: https://redis.io/commands/config-set/
+// [FT.CONFIG SET]: https://redis.io/commands/ft.config-set/
+func (c cmdable) FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "FT.CONFIG", "SET", option, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTCreate - Creates a new index with the given options and schema.
+// The 'index' parameter specifies the name of the index to create.
+// The 'options' parameter specifies various options for the index, such as:
+// whether to index hashes or JSONs, prefixes, filters, default language, score, score field, payload field, etc.
+// The 'schema' parameter specifies the schema for the index, which includes the field name, field type, etc.
+// For more information, please refer to the Redis documentation:
+// [FT.CREATE]: (https://redis.io/commands/ft.create/)
+func (c cmdable) FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd {
+ args := []interface{}{"FT.CREATE", index}
+ if options != nil {
+ if options.OnHash && !options.OnJSON {
+ args = append(args, "ON", "HASH")
+ }
+ if options.OnJSON && !options.OnHash {
+ args = append(args, "ON", "JSON")
+ }
+ if options.OnHash && options.OnJSON {
+ panic("FT.CREATE: ON HASH and ON JSON are mutually exclusive")
+ }
+ if options.Prefix != nil {
+ args = append(args, "PREFIX", len(options.Prefix))
+ args = append(args, options.Prefix...)
+ }
+ if options.Filter != "" {
+ args = append(args, "FILTER", options.Filter)
+ }
+ if options.DefaultLanguage != "" {
+ args = append(args, "LANGUAGE", options.DefaultLanguage)
+ }
+ if options.LanguageField != "" {
+ args = append(args, "LANGUAGE_FIELD", options.LanguageField)
+ }
+ if options.Score > 0 {
+ args = append(args, "SCORE", options.Score)
+ }
+ if options.ScoreField != "" {
+ args = append(args, "SCORE_FIELD", options.ScoreField)
+ }
+ if options.PayloadField != "" {
+ args = append(args, "PAYLOAD_FIELD", options.PayloadField)
+ }
+ if options.MaxTextFields > 0 {
+ args = append(args, "MAXTEXTFIELDS", options.MaxTextFields)
+ }
+ if options.NoOffsets {
+ args = append(args, "NOOFFSETS")
+ }
+ if options.Temporary > 0 {
+ args = append(args, "TEMPORARY", options.Temporary)
+ }
+ if options.NoHL {
+ args = append(args, "NOHL")
+ }
+ if options.NoFields {
+ args = append(args, "NOFIELDS")
+ }
+ if options.NoFreqs {
+ args = append(args, "NOFREQS")
+ }
+ if options.StopWords != nil {
+ args = append(args, "STOPWORDS", len(options.StopWords))
+ args = append(args, options.StopWords...)
+ }
+ if options.SkipInitialScan {
+ args = append(args, "SKIPINITIALSCAN")
+ }
+ }
+ if schema == nil {
+ panic("FT.CREATE: SCHEMA is required")
+ }
+ args = append(args, "SCHEMA")
+ for _, schema := range schema {
+ if schema.FieldName == "" || schema.FieldType == SearchFieldTypeInvalid {
+ panic("FT.CREATE: SCHEMA FieldName and FieldType are required")
+ }
+ args = append(args, schema.FieldName)
+ if schema.As != "" {
+ args = append(args, "AS", schema.As)
+ }
+ args = append(args, schema.FieldType.String())
+ if schema.VectorArgs != nil {
+ if schema.FieldType != SearchFieldTypeVector {
+ panic("FT.CREATE: SCHEMA FieldType VECTOR is required for VectorArgs")
+ }
+ if schema.VectorArgs.FlatOptions != nil && schema.VectorArgs.HNSWOptions != nil {
+ panic("FT.CREATE: SCHEMA VectorArgs FlatOptions and HNSWOptions are mutually exclusive")
+ }
+ if schema.VectorArgs.FlatOptions != nil {
+ args = append(args, "FLAT")
+ if schema.VectorArgs.FlatOptions.Type == "" || schema.VectorArgs.FlatOptions.Dim == 0 || schema.VectorArgs.FlatOptions.DistanceMetric == "" {
+ panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR FLAT")
+ }
+ flatArgs := []interface{}{
+ "TYPE", schema.VectorArgs.FlatOptions.Type,
+ "DIM", schema.VectorArgs.FlatOptions.Dim,
+ "DISTANCE_METRIC", schema.VectorArgs.FlatOptions.DistanceMetric,
+ }
+ if schema.VectorArgs.FlatOptions.InitialCapacity > 0 {
+ flatArgs = append(flatArgs, "INITIAL_CAP", schema.VectorArgs.FlatOptions.InitialCapacity)
+ }
+ if schema.VectorArgs.FlatOptions.BlockSize > 0 {
+ flatArgs = append(flatArgs, "BLOCK_SIZE", schema.VectorArgs.FlatOptions.BlockSize)
+ }
+ args = append(args, len(flatArgs))
+ args = append(args, flatArgs...)
+ }
+ if schema.VectorArgs.HNSWOptions != nil {
+ args = append(args, "HNSW")
+ if schema.VectorArgs.HNSWOptions.Type == "" || schema.VectorArgs.HNSWOptions.Dim == 0 || schema.VectorArgs.HNSWOptions.DistanceMetric == "" {
+ panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR HNSW")
+ }
+ hnswArgs := []interface{}{
+ "TYPE", schema.VectorArgs.HNSWOptions.Type,
+ "DIM", schema.VectorArgs.HNSWOptions.Dim,
+ "DISTANCE_METRIC", schema.VectorArgs.HNSWOptions.DistanceMetric,
+ }
+ if schema.VectorArgs.HNSWOptions.InitialCapacity > 0 {
+ hnswArgs = append(hnswArgs, "INITIAL_CAP", schema.VectorArgs.HNSWOptions.InitialCapacity)
+ }
+ if schema.VectorArgs.HNSWOptions.MaxEdgesPerNode > 0 {
+ hnswArgs = append(hnswArgs, "M", schema.VectorArgs.HNSWOptions.MaxEdgesPerNode)
+ }
+ if schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode > 0 {
+ hnswArgs = append(hnswArgs, "EF_CONSTRUCTION", schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode)
+ }
+ if schema.VectorArgs.HNSWOptions.EFRunTime > 0 {
+ hnswArgs = append(hnswArgs, "EF_RUNTIME", schema.VectorArgs.HNSWOptions.EFRunTime)
+ }
+ if schema.VectorArgs.HNSWOptions.Epsilon > 0 {
+ hnswArgs = append(hnswArgs, "EPSILON", schema.VectorArgs.HNSWOptions.Epsilon)
+ }
+ args = append(args, len(hnswArgs))
+ args = append(args, hnswArgs...)
+ }
+ }
+ if schema.GeoShapeFieldType != "" {
+ if schema.FieldType != SearchFieldTypeGeoShape {
+ panic("FT.CREATE: SCHEMA FieldType GEOSHAPE is required for GeoShapeFieldType")
+ }
+ args = append(args, schema.GeoShapeFieldType)
+ }
+ if schema.NoStem {
+ args = append(args, "NOSTEM")
+ }
+ if schema.Sortable {
+ args = append(args, "SORTABLE")
+ }
+ if schema.UNF {
+ args = append(args, "UNF")
+ }
+ if schema.NoIndex {
+ args = append(args, "NOINDEX")
+ }
+ if schema.PhoneticMatcher != "" {
+ args = append(args, "PHONETIC", schema.PhoneticMatcher)
+ }
+ if schema.Weight > 0 {
+ args = append(args, "WEIGHT", schema.Weight)
+ }
+ if schema.Separator != "" {
+ args = append(args, "SEPARATOR", schema.Separator)
+ }
+ if schema.CaseSensitive {
+ args = append(args, "CASESENSITIVE")
+ }
+ if schema.WithSuffixtrie {
+ args = append(args, "WITHSUFFIXTRIE")
+ }
+ if schema.IndexEmpty {
+ args = append(args, "INDEXEMPTY")
+ }
+ if schema.IndexMissing {
+ args = append(args, "INDEXMISSING")
+
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTCursorDel - Deletes a cursor from an existing index.
+// The 'index' parameter specifies the index from which to delete the cursor, and the 'cursorId' parameter specifies the ID of the cursor to delete.
+// For more information, please refer to the Redis documentation:
+// [FT.CURSOR DEL]: (https://redis.io/commands/ft.cursor-del/)
+func (c cmdable) FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "FT.CURSOR", "DEL", index, cursorId)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTCursorRead - Reads the next results from an existing cursor.
+// The 'index' parameter specifies the index from which to read the cursor, the 'cursorId' parameter specifies the ID of the cursor to read, and the 'count' parameter specifies the number of results to read.
+// For more information, please refer to the Redis documentation:
+// [FT.CURSOR READ]: (https://redis.io/commands/ft.cursor-read/)
+func (c cmdable) FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd {
+ args := []interface{}{"FT.CURSOR", "READ", index, cursorId}
+ if count > 0 {
+ args = append(args, "COUNT", count)
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDictAdd - Adds terms to a dictionary.
+// The 'dict' parameter specifies the dictionary to which to add the terms, and the 'term' parameter specifies the terms to add.
+// For more information, please refer to the Redis documentation:
+// [FT.DICTADD]: (https://redis.io/commands/ft.dictadd/)
+func (c cmdable) FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd {
+ args := []interface{}{"FT.DICTADD", dict}
+ args = append(args, term...)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDictDel - Deletes terms from a dictionary.
+// The 'dict' parameter specifies the dictionary from which to delete the terms, and the 'term' parameter specifies the terms to delete.
+// For more information, please refer to the Redis documentation:
+// [FT.DICTDEL]: (https://redis.io/commands/ft.dictdel/)
+func (c cmdable) FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd {
+ args := []interface{}{"FT.DICTDEL", dict}
+ args = append(args, term...)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDictDump - Returns all terms in the specified dictionary.
+// The 'dict' parameter specifies the dictionary from which to return the terms.
+// For more information, please refer to the Redis documentation:
+// [FT.DICTDUMP]: (https://redis.io/commands/ft.dictdump/)
+func (c cmdable) FTDictDump(ctx context.Context, dict string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "FT.DICTDUMP", dict)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDropIndex - Deletes an index.
+// The 'index' parameter specifies the index to delete.
+// For more information, please refer to the Redis documentation:
+// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/)
+func (c cmdable) FTDropIndex(ctx context.Context, index string) *StatusCmd {
+ args := []interface{}{"FT.DROPINDEX", index}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTDropIndexWithArgs - Deletes an index with options.
+// The 'index' parameter specifies the index to delete, and the 'options' parameter specifies the DeleteDocs option for docs deletion.
+// For more information, please refer to the Redis documentation:
+// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/)
+func (c cmdable) FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd {
+ args := []interface{}{"FT.DROPINDEX", index}
+ if options != nil {
+ if options.DeleteDocs {
+ args = append(args, "DD")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTExplain - Returns the execution plan for a complex query.
+// The 'index' parameter specifies the index to query, and the 'query' parameter specifies the query string.
+// For more information, please refer to the Redis documentation:
+// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/)
+func (c cmdable) FTExplain(ctx context.Context, index string, query string) *StringCmd {
+ cmd := NewStringCmd(ctx, "FT.EXPLAIN", index, query)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTExplainWithArgs - Returns the execution plan for a complex query with options.
+// The 'index' parameter specifies the index to query, the 'query' parameter specifies the query string, and the 'options' parameter specifies the Dialect for the query.
+// For more information, please refer to the Redis documentation:
+// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/)
+func (c cmdable) FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd {
+ args := []interface{}{"FT.EXPLAIN", index, query}
+ if options.Dialect != "" {
+ args = append(args, "DIALECT", options.Dialect)
+ } else {
+ args = append(args, "DIALECT", 2)
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTExplainCli - Returns the execution plan for a complex query. [Not Implemented]
+// For more information, see https://redis.io/commands/ft.explaincli/
+func (c cmdable) FTExplainCli(ctx context.Context, key, path string) error {
+ panic("not implemented")
+}
+
+func parseFTInfo(data map[string]interface{}) (FTInfoResult, error) {
+ var ftInfo FTInfoResult
+ // Manually parse each field from the map
+ if indexErrors, ok := data["Index Errors"].([]interface{}); ok {
+ ftInfo.IndexErrors = IndexErrors{
+ IndexingFailures: internal.ToInteger(indexErrors[1]),
+ LastIndexingError: internal.ToString(indexErrors[3]),
+ LastIndexingErrorKey: internal.ToString(indexErrors[5]),
+ }
+ }
+
+ if attributes, ok := data["attributes"].([]interface{}); ok {
+ for _, attr := range attributes {
+ if attrMap, ok := attr.([]interface{}); ok {
+ att := FTAttribute{}
+ for i := 0; i < len(attrMap); i++ {
+ if internal.ToLower(internal.ToString(attrMap[i])) == "attribute" {
+ att.Attribute = internal.ToString(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "identifier" {
+ att.Identifier = internal.ToString(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "type" {
+ att.Type = internal.ToString(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "weight" {
+ att.Weight = internal.ToFloat(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "nostem" {
+ att.NoStem = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "sortable" {
+ att.Sortable = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "noindex" {
+ att.NoIndex = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "unf" {
+ att.UNF = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "phonetic" {
+ att.PhoneticMatcher = internal.ToString(attrMap[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "case_sensitive" {
+ att.CaseSensitive = true
+ continue
+ }
+ if internal.ToLower(internal.ToString(attrMap[i])) == "withsuffixtrie" {
+ att.WithSuffixtrie = true
+ continue
+ }
+
+ }
+ ftInfo.Attributes = append(ftInfo.Attributes, att)
+ }
+ }
+ }
+
+ ftInfo.BytesPerRecordAvg = internal.ToString(data["bytes_per_record_avg"])
+ ftInfo.Cleaning = internal.ToInteger(data["cleaning"])
+
+ if cursorStats, ok := data["cursor_stats"].([]interface{}); ok {
+ ftInfo.CursorStats = CursorStats{
+ GlobalIdle: internal.ToInteger(cursorStats[1]),
+ GlobalTotal: internal.ToInteger(cursorStats[3]),
+ IndexCapacity: internal.ToInteger(cursorStats[5]),
+ IndexTotal: internal.ToInteger(cursorStats[7]),
+ }
+ }
+
+ if dialectStats, ok := data["dialect_stats"].([]interface{}); ok {
+ ftInfo.DialectStats = make(map[string]int)
+ for i := 0; i < len(dialectStats); i += 2 {
+ ftInfo.DialectStats[internal.ToString(dialectStats[i])] = internal.ToInteger(dialectStats[i+1])
+ }
+ }
+
+ ftInfo.DocTableSizeMB = internal.ToFloat(data["doc_table_size_mb"])
+
+ if fieldStats, ok := data["field statistics"].([]interface{}); ok {
+ for _, stat := range fieldStats {
+ if statMap, ok := stat.([]interface{}); ok {
+ ftInfo.FieldStatistics = append(ftInfo.FieldStatistics, FieldStatistic{
+ Identifier: internal.ToString(statMap[1]),
+ Attribute: internal.ToString(statMap[3]),
+ IndexErrors: IndexErrors{
+ IndexingFailures: internal.ToInteger(statMap[5].([]interface{})[1]),
+ LastIndexingError: internal.ToString(statMap[5].([]interface{})[3]),
+ LastIndexingErrorKey: internal.ToString(statMap[5].([]interface{})[5]),
+ },
+ })
+ }
+ }
+ }
+
+ if gcStats, ok := data["gc_stats"].([]interface{}); ok {
+ ftInfo.GCStats = GCStats{}
+ for i := 0; i < len(gcStats); i += 2 {
+ if internal.ToLower(internal.ToString(gcStats[i])) == "bytes_collected" {
+ ftInfo.GCStats.BytesCollected = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "total_ms_run" {
+ ftInfo.GCStats.TotalMsRun = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "total_cycles" {
+ ftInfo.GCStats.TotalCycles = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "average_cycle_time_ms" {
+ ftInfo.GCStats.AverageCycleTimeMs = internal.ToString(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "last_run_time_ms" {
+ ftInfo.GCStats.LastRunTimeMs = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "gc_numeric_trees_missed" {
+ ftInfo.GCStats.GCNumericTreesMissed = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ if internal.ToLower(internal.ToString(gcStats[i])) == "gc_blocks_denied" {
+ ftInfo.GCStats.GCBlocksDenied = internal.ToInteger(gcStats[i+1])
+ continue
+ }
+ }
+ }
+
+ ftInfo.GeoshapesSzMB = internal.ToFloat(data["geoshapes_sz_mb"])
+ ftInfo.HashIndexingFailures = internal.ToInteger(data["hash_indexing_failures"])
+
+ if indexDef, ok := data["index_definition"].([]interface{}); ok {
+ ftInfo.IndexDefinition = IndexDefinition{
+ KeyType: internal.ToString(indexDef[1]),
+ Prefixes: internal.ToStringSlice(indexDef[3]),
+ DefaultScore: internal.ToFloat(indexDef[5]),
+ }
+ }
+
+ ftInfo.IndexName = internal.ToString(data["index_name"])
+ ftInfo.IndexOptions = internal.ToStringSlice(data["index_options"].([]interface{}))
+ ftInfo.Indexing = internal.ToInteger(data["indexing"])
+ ftInfo.InvertedSzMB = internal.ToFloat(data["inverted_sz_mb"])
+ ftInfo.KeyTableSizeMB = internal.ToFloat(data["key_table_size_mb"])
+ ftInfo.MaxDocID = internal.ToInteger(data["max_doc_id"])
+ ftInfo.NumDocs = internal.ToInteger(data["num_docs"])
+ ftInfo.NumRecords = internal.ToInteger(data["num_records"])
+ ftInfo.NumTerms = internal.ToInteger(data["num_terms"])
+ ftInfo.NumberOfUses = internal.ToInteger(data["number_of_uses"])
+ ftInfo.OffsetBitsPerRecordAvg = internal.ToString(data["offset_bits_per_record_avg"])
+ ftInfo.OffsetVectorsSzMB = internal.ToFloat(data["offset_vectors_sz_mb"])
+ ftInfo.OffsetsPerTermAvg = internal.ToString(data["offsets_per_term_avg"])
+ ftInfo.PercentIndexed = internal.ToFloat(data["percent_indexed"])
+ ftInfo.RecordsPerDocAvg = internal.ToString(data["records_per_doc_avg"])
+ ftInfo.SortableValuesSizeMB = internal.ToFloat(data["sortable_values_size_mb"])
+ ftInfo.TagOverheadSzMB = internal.ToFloat(data["tag_overhead_sz_mb"])
+ ftInfo.TextOverheadSzMB = internal.ToFloat(data["text_overhead_sz_mb"])
+ ftInfo.TotalIndexMemorySzMB = internal.ToFloat(data["total_index_memory_sz_mb"])
+ ftInfo.TotalIndexingTime = internal.ToInteger(data["total_indexing_time"])
+ ftInfo.TotalInvertedIndexBlocks = internal.ToInteger(data["total_inverted_index_blocks"])
+ ftInfo.VectorIndexSzMB = internal.ToFloat(data["vector_index_sz_mb"])
+
+ return ftInfo, nil
+}
+
+type FTInfoCmd struct {
+ baseCmd
+ val FTInfoResult
+}
+
+func newFTInfoCmd(ctx context.Context, args ...interface{}) *FTInfoCmd {
+ return &FTInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FTInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FTInfoCmd) SetVal(val FTInfoResult) {
+ cmd.val = val
+}
+
+func (cmd *FTInfoCmd) Result() (FTInfoResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FTInfoCmd) Val() FTInfoResult {
+ return cmd.val
+}
+
+func (cmd *FTInfoCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *FTInfoCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+func (cmd *FTInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ data := make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err == Nil {
+ data[k] = Nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ data[k] = err
+ continue
+ }
+ return err
+ }
+ data[k] = v
+ }
+ cmd.val, err = parseFTInfo(data)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// FTInfo - Retrieves information about an index.
+// The 'index' parameter specifies the index to retrieve information about.
+// For more information, please refer to the Redis documentation:
+// [FT.INFO]: (https://redis.io/commands/ft.info/)
+func (c cmdable) FTInfo(ctx context.Context, index string) *FTInfoCmd {
+ cmd := newFTInfoCmd(ctx, "FT.INFO", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTSpellCheck - Checks a query string for spelling errors.
+// For more details about spellcheck query please follow:
+// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/
+// For more information, please refer to the Redis documentation:
+// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/)
+func (c cmdable) FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd {
+ args := []interface{}{"FT.SPELLCHECK", index, query}
+ cmd := newFTSpellCheckCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTSpellCheckWithArgs - Checks a query string for spelling errors with additional options.
+// For more details about spellcheck query please follow:
+// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/
+// For more information, please refer to the Redis documentation:
+// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/)
+func (c cmdable) FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd {
+ args := []interface{}{"FT.SPELLCHECK", index, query}
+ if options != nil {
+ if options.Distance > 0 {
+ args = append(args, "DISTANCE", options.Distance)
+ }
+ if options.Terms != nil {
+ args = append(args, "TERMS", options.Terms.Inclusion, options.Terms.Dictionary)
+ args = append(args, options.Terms.Terms...)
+ }
+ if options.Dialect > 0 {
+ args = append(args, "DIALECT", options.Dialect)
+ } else {
+ args = append(args, "DIALECT", 2)
+ }
+ }
+ cmd := newFTSpellCheckCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type FTSpellCheckCmd struct {
+ baseCmd
+ val []SpellCheckResult
+}
+
+func newFTSpellCheckCmd(ctx context.Context, args ...interface{}) *FTSpellCheckCmd {
+ return &FTSpellCheckCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FTSpellCheckCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FTSpellCheckCmd) SetVal(val []SpellCheckResult) {
+ cmd.val = val
+}
+
+func (cmd *FTSpellCheckCmd) Result() ([]SpellCheckResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FTSpellCheckCmd) Val() []SpellCheckResult {
+ return cmd.val
+}
+
+func (cmd *FTSpellCheckCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *FTSpellCheckCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+
+func (cmd *FTSpellCheckCmd) readReply(rd *proto.Reader) (err error) {
+ data, err := rd.ReadSlice()
+ if err != nil {
+ return err
+ }
+ cmd.val, err = parseFTSpellCheck(data)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func parseFTSpellCheck(data []interface{}) ([]SpellCheckResult, error) {
+ results := make([]SpellCheckResult, 0, len(data))
+
+ for _, termData := range data {
+ termInfo, ok := termData.([]interface{})
+ if !ok || len(termInfo) != 3 {
+ return nil, fmt.Errorf("invalid term format")
+ }
+
+ term, ok := termInfo[1].(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid term format")
+ }
+
+ suggestionsData, ok := termInfo[2].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("invalid suggestions format")
+ }
+
+ suggestions := make([]SpellCheckSuggestion, 0, len(suggestionsData))
+ for _, suggestionData := range suggestionsData {
+ suggestionInfo, ok := suggestionData.([]interface{})
+ if !ok || len(suggestionInfo) != 2 {
+ return nil, fmt.Errorf("invalid suggestion format")
+ }
+
+ scoreStr, ok := suggestionInfo[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid suggestion score format")
+ }
+ score, err := strconv.ParseFloat(scoreStr, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid suggestion score value")
+ }
+
+ suggestion, ok := suggestionInfo[1].(string)
+ if !ok {
+ return nil, fmt.Errorf("invalid suggestion format")
+ }
+
+ suggestions = append(suggestions, SpellCheckSuggestion{
+ Score: score,
+ Suggestion: suggestion,
+ })
+ }
+
+ results = append(results, SpellCheckResult{
+ Term: term,
+ Suggestions: suggestions,
+ })
+ }
+
+ return results, nil
+}
+
+func parseFTSearch(data []interface{}, noContent, withScores, withPayloads, withSortKeys bool) (FTSearchResult, error) {
+ if len(data) < 1 {
+ return FTSearchResult{}, fmt.Errorf("unexpected search result format")
+ }
+
+ total, ok := data[0].(int64)
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid total results format")
+ }
+
+ var results []Document
+ for i := 1; i < len(data); {
+ docID, ok := data[i].(string)
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid document ID format")
+ }
+
+ doc := Document{
+ ID: docID,
+ Fields: make(map[string]string),
+ }
+ i++
+
+ if noContent {
+ results = append(results, doc)
+ continue
+ }
+
+ if withScores && i < len(data) {
+ if scoreStr, ok := data[i].(string); ok {
+ score, err := strconv.ParseFloat(scoreStr, 64)
+ if err != nil {
+ return FTSearchResult{}, fmt.Errorf("invalid score format")
+ }
+ doc.Score = &score
+ i++
+ }
+ }
+
+ if withPayloads && i < len(data) {
+ if payload, ok := data[i].(string); ok {
+ doc.Payload = &payload
+ i++
+ }
+ }
+
+ if withSortKeys && i < len(data) {
+ if sortKey, ok := data[i].(string); ok {
+ doc.SortKey = &sortKey
+ i++
+ }
+ }
+
+ if i < len(data) {
+ fields, ok := data[i].([]interface{})
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid document fields format")
+ }
+
+ for j := 0; j < len(fields); j += 2 {
+ key, ok := fields[j].(string)
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid field key format")
+ }
+ value, ok := fields[j+1].(string)
+ if !ok {
+ return FTSearchResult{}, fmt.Errorf("invalid field value format")
+ }
+ doc.Fields[key] = value
+ }
+ i++
+ }
+
+ results = append(results, doc)
+ }
+ return FTSearchResult{
+ Total: int(total),
+ Docs: results,
+ }, nil
+}
+
+type FTSearchCmd struct {
+ baseCmd
+ val FTSearchResult
+ options *FTSearchOptions
+}
+
+func newFTSearchCmd(ctx context.Context, options *FTSearchOptions, args ...interface{}) *FTSearchCmd {
+ return &FTSearchCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ options: options,
+ }
+}
+
+func (cmd *FTSearchCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FTSearchCmd) SetVal(val FTSearchResult) {
+ cmd.val = val
+}
+
+func (cmd *FTSearchCmd) Result() (FTSearchResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FTSearchCmd) Val() FTSearchResult {
+ return cmd.val
+}
+
+func (cmd *FTSearchCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *FTSearchCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+
+func (cmd *FTSearchCmd) readReply(rd *proto.Reader) (err error) {
+ data, err := rd.ReadSlice()
+ if err != nil {
+ return err
+ }
+ cmd.val, err = parseFTSearch(data, cmd.options.NoContent, cmd.options.WithScores, cmd.options.WithPayloads, cmd.options.WithSortKeys)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// FTSearch - Executes a search query on an index.
+// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query.
+// For more information, please refer to the Redis documentation about [FT.SEARCH].
+//
+// [FT.SEARCH]: (https://redis.io/commands/ft.search/)
+func (c cmdable) FTSearch(ctx context.Context, index string, query string) *FTSearchCmd {
+ args := []interface{}{"FT.SEARCH", index, query}
+ cmd := newFTSearchCmd(ctx, &FTSearchOptions{}, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type SearchQuery []interface{}
+
+// FTSearchQuery - Executes a search query on an index with additional options.
+// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query,
+// and the 'options' parameter specifies additional options for the search.
+// For more information, please refer to the Redis documentation about [FT.SEARCH].
+//
+// [FT.SEARCH]: (https://redis.io/commands/ft.search/)
+func FTSearchQuery(query string, options *FTSearchOptions) SearchQuery {
+ queryArgs := []interface{}{query}
+ if options != nil {
+ if options.NoContent {
+ queryArgs = append(queryArgs, "NOCONTENT")
+ }
+ if options.Verbatim {
+ queryArgs = append(queryArgs, "VERBATIM")
+ }
+ if options.NoStopWords {
+ queryArgs = append(queryArgs, "NOSTOPWORDS")
+ }
+ if options.WithScores {
+ queryArgs = append(queryArgs, "WITHSCORES")
+ }
+ if options.WithPayloads {
+ queryArgs = append(queryArgs, "WITHPAYLOADS")
+ }
+ if options.WithSortKeys {
+ queryArgs = append(queryArgs, "WITHSORTKEYS")
+ }
+ if options.Filters != nil {
+ for _, filter := range options.Filters {
+ queryArgs = append(queryArgs, "FILTER", filter.FieldName, filter.Min, filter.Max)
+ }
+ }
+ if options.GeoFilter != nil {
+ for _, geoFilter := range options.GeoFilter {
+ queryArgs = append(queryArgs, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit)
+ }
+ }
+ if options.InKeys != nil {
+ queryArgs = append(queryArgs, "INKEYS", len(options.InKeys))
+ queryArgs = append(queryArgs, options.InKeys...)
+ }
+ if options.InFields != nil {
+ queryArgs = append(queryArgs, "INFIELDS", len(options.InFields))
+ queryArgs = append(queryArgs, options.InFields...)
+ }
+ if options.Return != nil {
+ queryArgs = append(queryArgs, "RETURN")
+ queryArgsReturn := []interface{}{}
+ for _, ret := range options.Return {
+ queryArgsReturn = append(queryArgsReturn, ret.FieldName)
+ if ret.As != "" {
+ queryArgsReturn = append(queryArgsReturn, "AS", ret.As)
+ }
+ }
+ queryArgs = append(queryArgs, len(queryArgsReturn))
+ queryArgs = append(queryArgs, queryArgsReturn...)
+ }
+ if options.Slop > 0 {
+ queryArgs = append(queryArgs, "SLOP", options.Slop)
+ }
+ if options.Timeout > 0 {
+ queryArgs = append(queryArgs, "TIMEOUT", options.Timeout)
+ }
+ if options.InOrder {
+ queryArgs = append(queryArgs, "INORDER")
+ }
+ if options.Language != "" {
+ queryArgs = append(queryArgs, "LANGUAGE", options.Language)
+ }
+ if options.Expander != "" {
+ queryArgs = append(queryArgs, "EXPANDER", options.Expander)
+ }
+ if options.Scorer != "" {
+ queryArgs = append(queryArgs, "SCORER", options.Scorer)
+ }
+ if options.ExplainScore {
+ queryArgs = append(queryArgs, "EXPLAINSCORE")
+ }
+ if options.Payload != "" {
+ queryArgs = append(queryArgs, "PAYLOAD", options.Payload)
+ }
+ if options.SortBy != nil {
+ queryArgs = append(queryArgs, "SORTBY")
+ for _, sortBy := range options.SortBy {
+ queryArgs = append(queryArgs, sortBy.FieldName)
+ if sortBy.Asc && sortBy.Desc {
+ panic("FT.SEARCH: ASC and DESC are mutually exclusive")
+ }
+ if sortBy.Asc {
+ queryArgs = append(queryArgs, "ASC")
+ }
+ if sortBy.Desc {
+ queryArgs = append(queryArgs, "DESC")
+ }
+ }
+ if options.SortByWithCount {
+ queryArgs = append(queryArgs, "WITHCOUNT")
+ }
+ }
+ if options.LimitOffset >= 0 && options.Limit > 0 {
+ queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit)
+ }
+ if options.Params != nil {
+ queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2)
+ for key, value := range options.Params {
+ queryArgs = append(queryArgs, key, value)
+ }
+ }
+ if options.DialectVersion > 0 {
+ queryArgs = append(queryArgs, "DIALECT", options.DialectVersion)
+ } else {
+ queryArgs = append(queryArgs, "DIALECT", 2)
+ }
+ }
+ return queryArgs
+}
+
+// FTSearchWithArgs - Executes a search query on an index with additional options.
+// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query,
+// and the 'options' parameter specifies additional options for the search.
+// For more information, please refer to the Redis documentation about [FT.SEARCH].
+//
+// [FT.SEARCH]: (https://redis.io/commands/ft.search/)
+func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd {
+ args := []interface{}{"FT.SEARCH", index, query}
+ if options != nil {
+ if options.NoContent {
+ args = append(args, "NOCONTENT")
+ }
+ if options.Verbatim {
+ args = append(args, "VERBATIM")
+ }
+ if options.NoStopWords {
+ args = append(args, "NOSTOPWORDS")
+ }
+ if options.WithScores {
+ args = append(args, "WITHSCORES")
+ }
+ if options.WithPayloads {
+ args = append(args, "WITHPAYLOADS")
+ }
+ if options.WithSortKeys {
+ args = append(args, "WITHSORTKEYS")
+ }
+ if options.Filters != nil {
+ for _, filter := range options.Filters {
+ args = append(args, "FILTER", filter.FieldName, filter.Min, filter.Max)
+ }
+ }
+ if options.GeoFilter != nil {
+ for _, geoFilter := range options.GeoFilter {
+ args = append(args, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit)
+ }
+ }
+ if options.InKeys != nil {
+ args = append(args, "INKEYS", len(options.InKeys))
+ args = append(args, options.InKeys...)
+ }
+ if options.InFields != nil {
+ args = append(args, "INFIELDS", len(options.InFields))
+ args = append(args, options.InFields...)
+ }
+ if options.Return != nil {
+ args = append(args, "RETURN")
+ argsReturn := []interface{}{}
+ for _, ret := range options.Return {
+ argsReturn = append(argsReturn, ret.FieldName)
+ if ret.As != "" {
+ argsReturn = append(argsReturn, "AS", ret.As)
+ }
+ }
+ args = append(args, len(argsReturn))
+ args = append(args, argsReturn...)
+ }
+ if options.Slop > 0 {
+ args = append(args, "SLOP", options.Slop)
+ }
+ if options.Timeout > 0 {
+ args = append(args, "TIMEOUT", options.Timeout)
+ }
+ if options.InOrder {
+ args = append(args, "INORDER")
+ }
+ if options.Language != "" {
+ args = append(args, "LANGUAGE", options.Language)
+ }
+ if options.Expander != "" {
+ args = append(args, "EXPANDER", options.Expander)
+ }
+ if options.Scorer != "" {
+ args = append(args, "SCORER", options.Scorer)
+ }
+ if options.ExplainScore {
+ args = append(args, "EXPLAINSCORE")
+ }
+ if options.Payload != "" {
+ args = append(args, "PAYLOAD", options.Payload)
+ }
+ if options.SortBy != nil {
+ args = append(args, "SORTBY")
+ for _, sortBy := range options.SortBy {
+ args = append(args, sortBy.FieldName)
+ if sortBy.Asc && sortBy.Desc {
+ panic("FT.SEARCH: ASC and DESC are mutually exclusive")
+ }
+ if sortBy.Asc {
+ args = append(args, "ASC")
+ }
+ if sortBy.Desc {
+ args = append(args, "DESC")
+ }
+ }
+ if options.SortByWithCount {
+ args = append(args, "WITHCOUNT")
+ }
+ }
+ if options.CountOnly {
+ args = append(args, "LIMIT", 0, 0)
+ } else {
+ if options.LimitOffset >= 0 && options.Limit > 0 || options.LimitOffset > 0 && options.Limit == 0 {
+ args = append(args, "LIMIT", options.LimitOffset, options.Limit)
+ }
+ }
+ if options.Params != nil {
+ args = append(args, "PARAMS", len(options.Params)*2)
+ for key, value := range options.Params {
+ args = append(args, key, value)
+ }
+ }
+ if options.DialectVersion > 0 {
+ args = append(args, "DIALECT", options.DialectVersion)
+ } else {
+ args = append(args, "DIALECT", 2)
+ }
+ }
+ cmd := newFTSearchCmd(ctx, options, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func NewFTSynDumpCmd(ctx context.Context, args ...interface{}) *FTSynDumpCmd {
+ return &FTSynDumpCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FTSynDumpCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FTSynDumpCmd) SetVal(val []FTSynDumpResult) {
+ cmd.val = val
+}
+
+func (cmd *FTSynDumpCmd) Val() []FTSynDumpResult {
+ return cmd.val
+}
+
+func (cmd *FTSynDumpCmd) Result() ([]FTSynDumpResult, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FTSynDumpCmd) RawVal() interface{} {
+ return cmd.rawVal
+}
+
+func (cmd *FTSynDumpCmd) RawResult() (interface{}, error) {
+ return cmd.rawVal, cmd.err
+}
+
+func (cmd *FTSynDumpCmd) readReply(rd *proto.Reader) error {
+ termSynonymPairs, err := rd.ReadSlice()
+ if err != nil {
+ return err
+ }
+
+ var results []FTSynDumpResult
+ for i := 0; i < len(termSynonymPairs); i += 2 {
+ term, ok := termSynonymPairs[i].(string)
+ if !ok {
+ return fmt.Errorf("invalid term format")
+ }
+
+ synonyms, ok := termSynonymPairs[i+1].([]interface{})
+ if !ok {
+ return fmt.Errorf("invalid synonyms format")
+ }
+
+ synonymList := make([]string, len(synonyms))
+ for j, syn := range synonyms {
+ synonym, ok := syn.(string)
+ if !ok {
+ return fmt.Errorf("invalid synonym format")
+ }
+ synonymList[j] = synonym
+ }
+
+ results = append(results, FTSynDumpResult{
+ Term: term,
+ Synonyms: synonymList,
+ })
+ }
+
+ cmd.val = results
+ return nil
+}
+
+// FTSynDump - Dumps the contents of a synonym group.
+// The 'index' parameter specifies the index to dump.
+// For more information, please refer to the Redis documentation:
+// [FT.SYNDUMP]: (https://redis.io/commands/ft.syndump/)
+func (c cmdable) FTSynDump(ctx context.Context, index string) *FTSynDumpCmd {
+ cmd := NewFTSynDumpCmd(ctx, "FT.SYNDUMP", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTSynUpdate - Creates or updates a synonym group with additional terms.
+// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, and the 'terms' parameter specifies the additional terms.
+// For more information, please refer to the Redis documentation:
+// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/)
+func (c cmdable) FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd {
+ args := []interface{}{"FT.SYNUPDATE", index, synGroupId}
+ args = append(args, terms...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTSynUpdateWithArgs - Creates or updates a synonym group with additional terms and options.
+// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, the 'options' parameter specifies additional options for the update, and the 'terms' parameter specifies the additional terms.
+// For more information, please refer to the Redis documentation:
+// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/)
+func (c cmdable) FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd {
+ args := []interface{}{"FT.SYNUPDATE", index, synGroupId}
+ if options.SkipInitialScan {
+ args = append(args, "SKIPINITIALSCAN")
+ }
+ args = append(args, terms...)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FTTagVals - Returns all distinct values indexed in a tag field.
+// The 'index' parameter specifies the index to check, and the 'field' parameter specifies the tag field to retrieve values from.
+// For more information, please refer to the Redis documentation:
+// [FT.TAGVALS]: (https://redis.io/commands/ft.tagvals/)
+func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "FT.TAGVALS", index, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/sentinel.go b/vendor/github.com/redis/go-redis/v9/sentinel.go
new file mode 100644
index 0000000..43fbcd2
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/sentinel.go
@@ -0,0 +1,1048 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // If specified with SentinelPassword, enables ACL-based authentication (via
+ // AUTH ).
+ SentinelUsername string
+ // Sentinel password from "requirepass " (if enabled) in Sentinel
+ // configuration, or, if SentinelUsername is also supplied, used for ACL-based
+ // authentication.
+ SentinelPassword string
+
+ // Allows routing read-only commands to the closest master or replica node.
+ // This option only works with NewFailoverClusterClient.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or replica node.
+ // This option only works with NewFailoverClusterClient.
+ RouteRandomly bool
+
+ // Route all commands to replica read-only nodes.
+ ReplicaOnly bool
+
+ // Use replicas disconnected with master when cannot get connected replicas
+ // Now, this option only works in RandomReplicaAddr function.
+ UseDisconnectedReplicas bool
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+
+ // DisableIndentity - Disable set-lib on connect.
+ //
+ // default: false
+ //
+ // Deprecated: Use DisableIdentity instead.
+ DisableIndentity bool
+
+ // DisableIdentity is used to disable CLIENT SETINFO command on connect.
+ //
+ // default: false
+ DisableIdentity bool
+
+ IdentitySuffix string
+ UnstableResp3 bool
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIdentity: opt.DisableIdentity,
+ DisableIndentity: opt.DisableIndentity,
+
+ IdentitySuffix: opt.IdentitySuffix,
+ UnstableResp3: opt.UnstableResp3,
+ }
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+ return &Options{
+ Addr: addr,
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: 0,
+ Username: opt.SentinelUsername,
+ Password: opt.SentinelPassword,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIdentity: opt.DisableIdentity,
+ DisableIndentity: opt.DisableIndentity,
+
+ IdentitySuffix: opt.IdentitySuffix,
+ UnstableResp3: opt.UnstableResp3,
+ }
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+ return &ClusterOptions{
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRedirects: opt.MaxRetries,
+
+ RouteByLatency: opt.RouteByLatency,
+ RouteRandomly: opt.RouteRandomly,
+
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIdentity: opt.DisableIdentity,
+ DisableIndentity: opt.DisableIndentity,
+
+ IdentitySuffix: opt.IdentitySuffix,
+ }
+}
+
+// ParseFailoverURL parses a URL into FailoverOptions that can be used to connect to Redis.
+// The URL must be in the form:
+//
+// redis://:@:/
+// or
+// rediss://:@:/
+//
+// To add additional addresses, specify the query parameter, "addr" one or more times. e.g:
+//
+// redis://:@:/?addr=:&addr=:
+// or
+// rediss://:@:/?addr=:&addr=:
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is interpreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "sentinel_username" and "sentinel_password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query parameters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Example:
+//
+// redis://user:password@localhost:6789?master_name=mymaster&dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791
+// is equivalent to:
+// &FailoverOptions{
+// MasterName: "mymaster",
+// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"]
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// }
+func ParseFailoverURL(redisURL string) (*FailoverOptions, error) {
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+ return setupFailoverConn(u)
+}
+
+func setupFailoverConn(u *url.URL) (*FailoverOptions, error) {
+ o := &FailoverOptions{}
+
+ o.SentinelUsername, o.SentinelPassword = getUserPassword(u)
+
+ h, p := getHostPortWithDefaults(u)
+ o.SentinelAddrs = append(o.SentinelAddrs, net.JoinHostPort(h, p))
+
+ switch u.Scheme {
+ case "rediss":
+ o.TLSConfig = &tls.Config{ServerName: h, MinVersion: tls.VersionTLS12}
+ case "redis":
+ o.TLSConfig = nil
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ var err error
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+ }
+ default:
+ return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+ }
+
+ return setupFailoverConnParams(u, o)
+}
+
+func setupFailoverConnParams(u *url.URL, o *FailoverOptions) (*FailoverOptions, error) {
+ q := queryOptions{q: u.Query()}
+
+ o.MasterName = q.string("master_name")
+ o.ClientName = q.string("client_name")
+ o.RouteByLatency = q.bool("route_by_latency")
+ o.RouteRandomly = q.bool("route_randomly")
+ o.ReplicaOnly = q.bool("replica_only")
+ o.UseDisconnectedReplicas = q.bool("use_disconnected_replicas")
+ o.Protocol = q.int("protocol")
+ o.Username = q.string("username")
+ o.Password = q.string("password")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.ContextTimeoutEnabled = q.bool("context_timeout_enabled")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.DisableIdentity = q.bool("disableIdentity")
+ o.IdentitySuffix = q.string("identitySuffix")
+ o.UnstableResp3 = q.bool("unstable_resp3")
+
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ if tmp := q.string("db"); tmp != "" {
+ db, err := strconv.Atoi(tmp)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %w", err)
+ }
+ o.DB = db
+ }
+
+ addrs := q.strings("addr")
+ for _, addr := range addrs {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil || h == "" || p == "" {
+ return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr)
+ }
+
+ o.SentinelAddrs = append(o.SentinelAddrs, net.JoinHostPort(h, p))
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ if failoverOpt == nil {
+ panic("redis: NewFailoverClient nil options")
+ }
+
+ if failoverOpt.RouteByLatency {
+ panic("to route commands by latency, use NewFailoverClusterClient")
+ }
+ if failoverOpt.RouteRandomly {
+ panic("to route commands randomly, use NewFailoverClusterClient")
+ }
+
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+ sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+ })
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clientOptions()
+ opt.Dialer = masterReplicaDialer(failover)
+ opt.init()
+
+ var connPool *pool.ConnPool
+
+ rdb := &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ rdb.init()
+
+ connPool = newConnPool(opt, rdb.dialHook)
+ rdb.connPool = connPool
+ rdb.onClose = rdb.wrappedOnClose(failover.Close)
+
+ failover.mu.Lock()
+ failover.onFailover = func(ctx context.Context, addr string) {
+ _ = connPool.Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
+ })
+ }
+ failover.mu.Unlock()
+
+ return rdb
+}
+
+func masterReplicaDialer(
+ failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
+ var addr string
+ var err error
+
+ if failover.opt.ReplicaOnly {
+ addr, err = failover.RandomReplicaAddr(ctx)
+ } else {
+ addr, err = failover.MasterAddr(ctx)
+ if err == nil {
+ failover.trySwitchMaster(ctx, addr)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ if failover.opt.Dialer != nil {
+ return failover.opt.Dialer(ctx, network, addr)
+ }
+
+ netDialer := &net.Dialer{
+ Timeout: failover.opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if failover.opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+ *baseClient
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+ if opt == nil {
+ panic("redis: NewSentinelClient nil options")
+ }
+ opt.init()
+ c := &SentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ })
+ c.connPool = newConnPool(opt, c.dialHook)
+
+ return c
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "ping")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "sentinels", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every replica and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+ cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "sentinel", "master", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "masters")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Replicas shows a list of replicas for the specified master and their state.
+func (c *SentinelClient) Replicas(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "replicas", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+ opt *FailoverOptions
+
+ sentinelAddrs []string
+
+ onFailover func(ctx context.Context, addr string)
+ onUpdate func(ctx context.Context)
+
+ mu sync.RWMutex
+ _masterAddr string
+ sentinel *SentinelClient
+ pubsub *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.sentinel != nil {
+ return c.closeSentinel()
+ }
+ return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+ firstErr := c.pubsub.Close()
+ c.pubsub = nil
+
+ err := c.sentinel.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ c.sentinel = nil
+
+ return firstErr
+}
+
+func (c *sentinelFailover) RandomReplicaAddr(ctx context.Context) (string, error) {
+ if c.opt == nil {
+ return "", errors.New("opt is nil")
+ }
+
+ addresses, err := c.replicaAddrs(ctx, false)
+ if err != nil {
+ return "", err
+ }
+
+ if len(addresses) == 0 && c.opt.UseDisconnectedReplicas {
+ addresses, err = c.replicaAddrs(ctx, true)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if len(addresses) == 0 {
+ return c.MasterAddr(ctx)
+ }
+ return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addr, err := c.getMasterAddr(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
+ return addr, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addr, err := c.getMasterAddr(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
+ return addr, nil
+ }
+ }
+
+ var (
+ masterAddr string
+ wg sync.WaitGroup
+ once sync.Once
+ errCh = make(chan error, len(c.sentinelAddrs))
+ )
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ wg.Add(1)
+ go func(i int, addr string) {
+ defer wg.Done()
+ sentinelCli := NewSentinelClient(c.opt.sentinelOptions(addr))
+ addrVal, err := sentinelCli.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName addr=%s, master=%q failed: %s",
+ addr, c.opt.MasterName, err)
+ _ = sentinelCli.Close()
+ errCh <- err
+ return
+ }
+ once.Do(func() {
+ masterAddr = net.JoinHostPort(addrVal[0], addrVal[1])
+ // Push working sentinel to the top
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinelCli)
+ internal.Logger.Printf(ctx, "sentinel: selected addr=%s masterAddr=%s", addr, masterAddr)
+ cancel()
+ })
+ }(i, sentinelAddr)
+ }
+
+ wg.Wait()
+ close(errCh)
+ if masterAddr != "" {
+ return masterAddr, nil
+ }
+ errs := make([]error, 0, len(errCh))
+ for err := range errCh {
+ errs = append(errs, err)
+ }
+ return "", fmt.Errorf("redis: all sentinels specified in configuration are unreachable: %w", errors.Join(errs...))
+}
+
+func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addrs, err := c.getReplicaAddrs(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
+ return addrs, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addrs, err := c.getReplicaAddrs(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
+ return addrs, nil
+ } else {
+ // No error and no replicas.
+ _ = c.closeSentinel()
+ }
+ }
+
+ var sentinelReachable bool
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ replicas, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ _ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ internal.Logger.Printf(ctx, "sentinel: Replicas master=%q failed: %s",
+ c.opt.MasterName, err)
+ continue
+ }
+ sentinelReachable = true
+ addrs := parseReplicaAddrs(replicas, useDisconnected)
+ if len(addrs) == 0 {
+ continue
+ }
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ return addrs, nil
+ }
+
+ if sentinelReachable {
+ return []string{}, nil
+ }
+ return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) (string, error) {
+ addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ return "", err
+ }
+ return net.JoinHostPort(addr[0], addr[1]), nil
+}
+
+func (c *sentinelFailover) getReplicaAddrs(ctx context.Context, sentinel *SentinelClient) ([]string, error) {
+ addrs, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ return nil, err
+ }
+ return parseReplicaAddrs(addrs, false), nil
+}
+
+func parseReplicaAddrs(addrs []map[string]string, keepDisconnected bool) []string {
+ nodes := make([]string, 0, len(addrs))
+ for _, node := range addrs {
+ isDown := false
+ if flags, ok := node["flags"]; ok {
+ for _, flag := range strings.Split(flags, ",") {
+ switch flag {
+ case "s_down", "o_down":
+ isDown = true
+ case "disconnected":
+ if !keepDisconnected {
+ isDown = true
+ }
+ }
+ }
+ }
+ if !isDown && node["ip"] != "" && node["port"] != "" {
+ nodes = append(nodes, net.JoinHostPort(node["ip"], node["port"]))
+ }
+ }
+
+ return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+ c.mu.RLock()
+ currentAddr := c._masterAddr //nolint:ifshort
+ c.mu.RUnlock()
+
+ if addr == currentAddr {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if addr == c._masterAddr {
+ return
+ }
+ c._masterAddr = addr
+
+ internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+ c.opt.MasterName, addr)
+ if c.onFailover != nil {
+ c.onFailover(ctx, addr)
+ }
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+ if c.sentinel != nil {
+ panic("not reached")
+ }
+ c.sentinel = sentinel
+ c.discoverSentinels(ctx)
+
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+replica-reconf-done")
+ go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+ sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ ip, ok := sentinel["ip"]
+ if !ok {
+ continue
+ }
+ port, ok := sentinel["port"]
+ if !ok {
+ continue
+ }
+ if ip != "" && port != "" {
+ sentinelAddr := net.JoinHostPort(ip, port)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.opt.MasterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+ ctx := context.TODO()
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+
+ ch := pubsub.Channel()
+ for msg := range ch {
+ if msg.Channel == "+switch-master" {
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != c.opt.MasterName {
+ internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ c.trySwitchMaster(pubsub.getContext(), addr)
+ }
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a replica node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+ if failoverOpt == nil {
+ panic("redis: NewFailoverClusterClient nil options")
+ }
+
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clusterOptions()
+ if failoverOpt.DB != 0 {
+ onConnect := opt.OnConnect
+
+ opt.OnConnect = func(ctx context.Context, cn *Conn) error {
+ if err := cn.Select(ctx, failoverOpt.DB).Err(); err != nil {
+ return err
+ }
+
+ if onConnect != nil {
+ return onConnect(ctx, cn)
+ }
+
+ return nil
+ }
+ }
+
+ opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+ masterAddr, err := failover.MasterAddr(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := []ClusterNode{{
+ Addr: masterAddr,
+ }}
+
+ replicaAddrs, err := failover.replicaAddrs(ctx, false)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, replicaAddr := range replicaAddrs {
+ nodes = append(nodes, ClusterNode{
+ Addr: replicaAddr,
+ })
+ }
+
+ slots := []ClusterSlot{
+ {
+ Start: 0,
+ End: 16383,
+ Nodes: nodes,
+ },
+ }
+ return slots, nil
+ }
+
+ c := NewClusterClient(opt)
+
+ failover.mu.Lock()
+ failover.onUpdate = func(ctx context.Context) {
+ c.ReloadState(ctx)
+ }
+ failover.mu.Unlock()
+
+ return c
+}
diff --git a/vendor/github.com/redis/go-redis/v9/set_commands.go b/vendor/github.com/redis/go-redis/v9/set_commands.go
new file mode 100644
index 0000000..cef8ad6
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/set_commands.go
@@ -0,0 +1,217 @@
+package redis
+
+import "context"
+
+type SetCmdable interface {
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "sintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "smismember"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembersMap Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPop Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPopN Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMember Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMemberN Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/sortedset_commands.go b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go
new file mode 100644
index 0000000..6701402
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go
@@ -0,0 +1,772 @@
+package redis
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+type SortedSetCmdable interface {
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZAdd(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+ ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+ ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+ ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+ ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+ ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+ ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd
+ ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd
+ ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+ ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+ ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+ ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZMPop is the blocking variant of ZMPOP.
+// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP.
+// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses.
+// A timeout of zero can be used to block indefinitely.
+// example: client.BZMPop(ctx, 0,"max", 1, "set")
+func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "bzmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+ NX bool
+ XX bool
+ LT bool
+ GT bool
+ Ch bool
+ Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+ a := make([]interface{}, 0, 6+2*len(args.Members))
+ a = append(a, "zadd", key)
+
+ // The GT, LT and NX options are mutually exclusive.
+ if args.NX {
+ a = append(a, "nx")
+ } else {
+ if args.XX {
+ a = append(a, "xx")
+ }
+ if args.GT {
+ a = append(a, "gt")
+ } else if args.LT {
+ a = append(a, "lt")
+ }
+ }
+ if args.Ch {
+ a = append(a, "ch")
+ }
+ if incr {
+ a = append(a, "incr")
+ }
+ for _, m := range args.Members {
+ a = append(a, m.Score)
+ a = append(a, m.Member)
+ }
+ return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+ cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ Members: members,
+ })
+}
+
+// ZAddLT Redis `ZADD key LT score member [score member ...]` command.
+func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ LT: true,
+ Members: members,
+ })
+}
+
+// ZAddGT Redis `ZADD key GT score member [score member ...]` command.
+func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ GT: true,
+ Members: members,
+ })
+}
+
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ NX: true,
+ Members: members,
+ })
+}
+
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ XX: true,
+ Members: members,
+ })
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinterstore", destination, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "zintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names.
+// direction: "max" (highest score) or "min" (lowest score), count: > 0
+// example: client.ZMPop(ctx, "max", 5, "set1", "set2")
+func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "zmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "zmscore"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//
+// ZREVRANGE,
+// ZRANGEBYSCORE,
+// ZREVRANGEBYSCORE,
+// ZRANGEBYLEX,
+// ZREVRANGEBYLEX.
+//
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+ Key string
+
+ // When the ByScore option is provided, the open interval(exclusive) can be set.
+ // By default, the score intervals specified by and are closed (inclusive).
+ // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+ // For example:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "(3",
+ // Stop: 8,
+ // ByScore: true,
+ // }
+ // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
+ //
+ // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+ // You can set the and options as follows:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "[abc",
+ // Stop: "(def",
+ // ByLex: true,
+ // }
+ // cmd: "ZRange example-key [abc (def ByLex"
+ //
+ // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int).
+ // You can read the documentation for more information: https://redis.io/commands/zrange
+ Start interface{}
+ Stop interface{}
+
+ // The ByScore and ByLex options are mutually exclusive.
+ ByScore bool
+ ByLex bool
+
+ Rev bool
+
+ // limit offset count.
+ Offset int64
+ Count int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+ // For Rev+ByScore/ByLex, we need to adjust the position of and .
+ if z.Rev && (z.ByScore || z.ByLex) {
+ args = append(args, z.Key, z.Stop, z.Start)
+ } else {
+ args = append(args, z.Key, z.Start, z.Stop)
+ }
+
+ if z.ByScore {
+ args = append(args, "byscore")
+ } else if z.ByLex {
+ args = append(args, "bylex")
+ }
+ if z.Rev {
+ args = append(args, "rev")
+ }
+ if z.Offset != 0 || z.Count != 0 {
+ args = append(args, "limit", z.Offset, z.Count)
+ }
+ return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.ZRangeArgs(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrangestore", dst)
+ args = z.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRankWithScore according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRevRangeWithScores according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunionstore", dest, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMemberWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrandmember", key, count, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+ args[len(keys)+2] = "withscores"
+
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 0, 3+len(keys))
+ args = append(args, "zdiffstore", destination, len(keys))
+ for _, key := range keys {
+ args = append(args, key)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+func (z ZStore) len() (n int) {
+ n = len(z.Keys)
+ if len(z.Weights) > 0 {
+ n += 1 + len(z.Weights)
+ }
+ if z.Aggregate != "" {
+ n += 2
+ }
+ return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+ for _, key := range z.Keys {
+ args = append(args, key)
+ }
+ if len(z.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weights := range z.Weights {
+ args = append(args, weights)
+ }
+ }
+ if z.Aggregate != "" {
+ args = append(args, "aggregate", z.Aggregate)
+ }
+ return args
+}
diff --git a/vendor/github.com/redis/go-redis/v9/stream_commands.go b/vendor/github.com/redis/go-redis/v9/stream_commands.go
new file mode 100644
index 0000000..6d7b229
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/stream_commands.go
@@ -0,0 +1,450 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StreamCmdable interface {
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+ XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+ XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+ XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+ XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+ XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+ XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
+}
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
+type XAddArgs struct {
+ Stream string
+ NoMkStream bool
+ MaxLen int64 // MAXLEN N
+ MinID string
+ // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+ Approx bool
+ Limit int64
+ ID string
+ Values interface{}
+}
+
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 11)
+ args = append(args, "xadd", a.Stream)
+ if a.NoMkStream {
+ args = append(args, "nomkstream")
+ }
+ switch {
+ case a.MaxLen > 0:
+ if a.Approx {
+ args = append(args, "maxlen", "~", a.MaxLen)
+ } else {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ case a.MinID != "":
+ if a.Approx {
+ args = append(args, "minid", "~", a.MinID)
+ } else {
+ args = append(args, "minid", a.MinID)
+ }
+ }
+ if a.Limit > 0 {
+ args = append(args, "limit", a.Limit)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ ID string
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 2*len(a.Streams)+6)
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+ if a.ID != "" {
+ for range a.Streams {
+ args = append(args, a.ID)
+ }
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 10+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(4)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Idle time.Duration
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "xpending", a.Stream, a.Group)
+ if a.Idle != 0 {
+ args = append(args, "idle", formatMs(ctx, a.Idle))
+ }
+ args = append(args, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XAutoClaimArgs struct {
+ Stream string
+ Group string
+ MinIdle time.Duration
+ Start string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+ args := xAutoClaimArgs(ctx, a)
+ cmd := NewXAutoClaimCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+ args := xAutoClaimArgs(ctx, a)
+ args = append(args, "justid")
+ cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ return args
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 5+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//
+// XTRIM key MAXLEN/MINID threshold LIMIT limit.
+// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+//
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+ ctx context.Context, key, strategy string,
+ approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xtrim", key, strategy)
+ if approx {
+ args = append(args, "~")
+ }
+ args = append(args, threshold)
+ if limit > 0 {
+ args = append(args, "limit", limit)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+ return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+ cmd := NewXInfoConsumersCmd(ctx, key, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+ args := make([]interface{}, 0, 6)
+ args = append(args, "xinfo", "stream", key, "full")
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewXInfoStreamFullCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/string_commands.go b/vendor/github.com/redis/go-redis/v9/string_commands.go
new file mode 100644
index 0000000..eff5880
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/string_commands.go
@@ -0,0 +1,303 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StringCmdable interface {
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+ GetDel(ctx context.Context, key string) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ LCS(ctx context.Context, q *LCSQuery) *LCSCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+ SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+ args := make([]interface{}, 0, 4)
+ args = append(args, "getex", key)
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == 0 {
+ args = append(args, "persist")
+ }
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "getdel", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd {
+ cmd := NewLCSCmd(ctx, q)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSet(struct), For struct types, see HSet description.
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSetNX(struct), For struct types, see HSet description.
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Set Redis `SET key value [expiration]` command.
+// Use expiration for `SETEx`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+ // Mode can be `NX` or `XX` or empty.
+ Mode string
+
+ // Zero `TTL` or `Expiration` means that the key has no expiration time.
+ TTL time.Duration
+ ExpireAt time.Time
+
+ // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+ Get bool
+
+ // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+ // otherwise you will receive an error: (error) ERR syntax error.
+ KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+ args := []interface{}{"set", key, value}
+
+ if a.KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ if !a.ExpireAt.IsZero() {
+ args = append(args, "exat", a.ExpireAt.Unix())
+ }
+ if a.TTL > 0 {
+ if usePrecise(a.TTL) {
+ args = append(args, "px", formatMs(ctx, a.TTL))
+ } else {
+ args = append(args, "ex", formatSec(ctx, a.TTL))
+ }
+ }
+
+ if a.Mode != "" {
+ args = append(args, a.Mode)
+ }
+
+ if a.Get {
+ args = append(args, "get")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetEx Redis `SETEx key expiration value` command.
+func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetNX Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetXX Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go
new file mode 100644
index 0000000..82d8cdf
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go
@@ -0,0 +1,950 @@
+package redis
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type TimeseriesCmdable interface {
+ TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd
+ TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd
+ TSCreate(ctx context.Context, key string) *StatusCmd
+ TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd
+ TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd
+ TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd
+ TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd
+ TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd
+ TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd
+ TSGet(ctx context.Context, key string) *TSTimestampValueCmd
+ TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd
+ TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd
+ TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd
+ TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd
+ TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd
+ TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd
+ TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd
+ TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd
+ TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd
+ TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd
+ TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd
+}
+
+type TSOptions struct {
+ Retention int
+ ChunkSize int
+ Encoding string
+ DuplicatePolicy string
+ Labels map[string]string
+ IgnoreMaxTimeDiff int64
+ IgnoreMaxValDiff float64
+}
+type TSIncrDecrOptions struct {
+ Timestamp int64
+ Retention int
+ ChunkSize int
+ Uncompressed bool
+ DuplicatePolicy string
+ Labels map[string]string
+ IgnoreMaxTimeDiff int64
+ IgnoreMaxValDiff float64
+}
+
+type TSAlterOptions struct {
+ Retention int
+ ChunkSize int
+ DuplicatePolicy string
+ Labels map[string]string
+ IgnoreMaxTimeDiff int64
+ IgnoreMaxValDiff float64
+}
+
+type TSCreateRuleOptions struct {
+ alignTimestamp int64
+}
+
+type TSGetOptions struct {
+ Latest bool
+}
+
+type TSInfoOptions struct {
+ Debug bool
+}
+type Aggregator int
+
+const (
+ Invalid = Aggregator(iota)
+ Avg
+ Sum
+ Min
+ Max
+ Range
+ Count
+ First
+ Last
+ StdP
+ StdS
+ VarP
+ VarS
+ Twa
+)
+
+func (a Aggregator) String() string {
+ switch a {
+ case Invalid:
+ return ""
+ case Avg:
+ return "AVG"
+ case Sum:
+ return "SUM"
+ case Min:
+ return "MIN"
+ case Max:
+ return "MAX"
+ case Range:
+ return "RANGE"
+ case Count:
+ return "COUNT"
+ case First:
+ return "FIRST"
+ case Last:
+ return "LAST"
+ case StdP:
+ return "STD.P"
+ case StdS:
+ return "STD.S"
+ case VarP:
+ return "VAR.P"
+ case VarS:
+ return "VAR.S"
+ case Twa:
+ return "TWA"
+ default:
+ return ""
+ }
+}
+
+type TSRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSMRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMGetOptions struct {
+ Latest bool
+ WithLabels bool
+ SelectedLabels []interface{}
+}
+
+// TSAdd - Adds one or more observations to a t-digest sketch.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAddWithArgs - Adds one or more observations to a t-digest sketch.
+// This function also allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreate - Creates a new time-series key.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateWithArgs - Creates a new time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAlter - Alters an existing time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize and DuplicatePolicy.
+// For more information - https://redis.io/commands/ts.alter/
+func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd {
+ args := []interface{}{"TS.ALTER", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRule - Creates a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRuleWithArgs - Creates a compaction rule from sourceKey to destKey with additional option.
+// This function allows for specifying additional option such as:
+// alignTimestamp.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ if options != nil {
+ if options.alignTimestamp != 0 {
+ args = append(args, options.alignTimestamp)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrBy - Increments the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.INCRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrByWithArgs - Increments the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.INCRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrBy - Decrements the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.DECRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrByWithArgs - Decrements the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.DECRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 {
+ args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff)
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDel - Deletes a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.del/
+func (c cmdable) TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd {
+ args := []interface{}{"TS.DEL", Key, fromTimestamp, toTimestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDeleteRule - Deletes a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.deleterule/
+func (c cmdable) TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd {
+ args := []interface{}{"TS.DELETERULE", sourceKey, destKey}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGetWithArgs - Gets the last sample of a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Latest.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ }
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGet - Gets the last sample of a time-series key.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGet(ctx context.Context, key string) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValue struct {
+ Timestamp int64
+ Value float64
+}
+type TSTimestampValueCmd struct {
+ baseCmd
+ val TSTimestampValue
+}
+
+func newTSTimestampValueCmd(ctx context.Context, args ...interface{}) *TSTimestampValueCmd {
+ return &TSTimestampValueCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueCmd) SetVal(val TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueCmd) Result() (TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueCmd) Val() TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = TSTimestampValue{}
+ for i := 0; i < n; i++ {
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Timestamp = timestamp
+ cmd.val.Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSInfo - Returns information about a time-series key.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSInfoWithArgs - Returns information about a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Debug.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ if options != nil {
+ if options.Debug {
+ args = append(args, "DEBUG")
+ }
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMAdd - Adds multiple samples to multiple time-series keys.
+// It accepts a slice of 'ktv' slices, each containing exactly three elements: key, timestamp, and value.
+// This struct must be provided for this command to work.
+// For more information - https://redis.io/commands/ts.madd/
+func (c cmdable) TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd {
+ args := []interface{}{"TS.MADD"}
+ for _, ktv := range ktvSlices {
+ args = append(args, ktv...)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSQueryIndex - Returns all the keys matching the filter expression.
+// For more information - https://redis.io/commands/ts.queryindex/
+func (c cmdable) TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd {
+ args := []interface{}{"TS.QUERYINDEX"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRange - Returns a range of samples from a time-series key in reverse order.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRangeWithArgs - Returns a range of samples from a time-series key in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRange - Returns a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRangeWithArgs - Returns a range of samples from a time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValueSliceCmd struct {
+ baseCmd
+ val []TSTimestampValue
+}
+
+func newTSTimestampValueSliceCmd(ctx context.Context, args ...interface{}) *TSTimestampValueSliceCmd {
+ return &TSTimestampValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueSliceCmd) SetVal(val []TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueSliceCmd) Result() ([]TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueSliceCmd) Val() []TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueSliceCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]TSTimestampValue, n)
+ for i := 0; i < n; i++ {
+ _, _ = rd.ReadArrayLen()
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Timestamp = timestamp
+ cmd.val[i].Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSMRange - Returns a range of samples from multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRangeWithArgs - Returns a range of samples from multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRange - Returns a range of samples from multiple time-series keys in reverse order.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRangeWithArgs - Returns a range of samples from multiple time-series keys in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGet - Returns the last sample of multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET", "FILTER"}
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGetWithArgs - Returns the last sample of multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, WithLabels and SelectedLabels.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET"}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/tx.go b/vendor/github.com/redis/go-redis/v9/tx.go
new file mode 100644
index 0000000..0daa222
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/tx.go
@@ -0,0 +1,150 @@
+package redis
+
+import (
+ "context"
+
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+//
+// If you don't need WATCH, use Pipeline instead.
+type Tx struct {
+ baseClient
+ cmdable
+ statefulCmdable
+}
+
+func (c *Client) newTx() *Tx {
+ tx := Tx{
+ baseClient: baseClient{
+ opt: c.opt,
+ connPool: pool.NewStickyConnPool(c.connPool),
+ hooksMixin: c.hooksMixin.clone(),
+ },
+ }
+ tx.init()
+ return &tx
+}
+
+func (c *Tx) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+}
+
+func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ tx := c.newTx()
+ defer tx.Close(ctx)
+ if len(keys) > 0 {
+ if err := tx.Watch(ctx, keys...).Err(); err != nil {
+ return err
+ }
+ }
+ return fn(tx)
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close(ctx context.Context) error {
+ _ = c.Unwatch(ctx).Err()
+ return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "watch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unwatch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
+func (c *Tx) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.processPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+// Pipelined executes commands queued in the fn outside of the transaction.
+// Use TxPipelined if you need transactional behavior.
+func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+// TxPipelined executes commands queued in the fn in the transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmdsCopy := make([]Cmder, len(cmds)+2)
+ cmdsCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdsCopy[1:], cmds)
+ cmdsCopy[len(cmdsCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdsCopy
+}
diff --git a/vendor/github.com/redis/go-redis/v9/universal.go b/vendor/github.com/redis/go-redis/v9/universal.go
new file mode 100644
index 0000000..a1ce17b
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/universal.go
@@ -0,0 +1,284 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+ // Either a single address or a seed list of host:port addresses
+ // of cluster/sentinel nodes.
+ Addrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // Database to be selected after connecting to the server.
+ // Only single-node and failover clients.
+ DB int
+
+ // Common options.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ SentinelUsername string
+ SentinelPassword string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ ReadOnly bool
+ RouteByLatency bool
+ RouteRandomly bool
+
+ // MasterName is the sentinel master name.
+ // Only for failover clients.
+ MasterName string
+
+ // DisableIndentity - Disable set-lib on connect.
+ //
+ // default: false
+ //
+ // Deprecated: Use DisableIdentity instead.
+ DisableIndentity bool
+
+ // DisableIdentity is used to disable CLIENT SETINFO command on connect.
+ //
+ // default: false
+ DisableIdentity bool
+
+ IdentitySuffix string
+ UnstableResp3 bool
+
+ // IsClusterMode can be used when only one Addrs is provided (e.g. Elasticache supports setting up cluster mode with configuration endpoint).
+ IsClusterMode bool
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:6379"}
+ }
+
+ return &ClusterOptions{
+ Addrs: o.Addrs,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRedirects: o.MaxRedirects,
+ ReadOnly: o.ReadOnly,
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ DisableIdentity: o.DisableIdentity,
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ UnstableResp3: o.UnstableResp3,
+ }
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:26379"}
+ }
+
+ return &FailoverOptions{
+ SentinelAddrs: o.Addrs,
+ MasterName: o.MasterName,
+ ClientName: o.ClientName,
+
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+ SentinelUsername: o.SentinelUsername,
+ SentinelPassword: o.SentinelPassword,
+
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ ReplicaOnly: o.ReadOnly,
+
+ DisableIdentity: o.DisableIdentity,
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ UnstableResp3: o.UnstableResp3,
+ }
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+ addr := "127.0.0.1:6379"
+ if len(o.Addrs) > 0 {
+ addr = o.Addrs[0]
+ }
+
+ return &Options{
+ Addr: addr,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ DisableIdentity: o.DisableIdentity,
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ UnstableResp3: o.UnstableResp3,
+ }
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
+type UniversalClient interface {
+ Cmdable
+ AddHook(Hook)
+ Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Subscribe(ctx context.Context, channels ...string) *PubSub
+ PSubscribe(ctx context.Context, channels ...string) *PubSub
+ SSubscribe(ctx context.Context, channels ...string) *PubSub
+ Close() error
+ PoolStats() *PoolStats
+}
+
+var (
+ _ UniversalClient = (*Client)(nil)
+ _ UniversalClient = (*ClusterClient)(nil)
+ _ UniversalClient = (*Ring)(nil)
+)
+
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
+//
+// 1. If the MasterName option is specified with RouteByLatency, RouteRandomly or IsClusterMode,
+// a FailoverClusterClient is returned.
+// 2. If the MasterName option is specified without RouteByLatency, RouteRandomly or IsClusterMode,
+// a sentinel-backed FailoverClient is returned.
+// 3. If the number of Addrs is two or more, or IsClusterMode option is specified,
+// a ClusterClient is returned.
+// 4. Otherwise, a single-node Client is returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+ if opts == nil {
+ panic("redis: NewUniversalClient nil options")
+ }
+
+ switch {
+ case opts.MasterName != "" && (opts.RouteByLatency || opts.RouteRandomly || opts.IsClusterMode):
+ return NewFailoverClusterClient(opts.Failover())
+ case opts.MasterName != "":
+ return NewFailoverClient(opts.Failover())
+ case len(opts.Addrs) > 1 || opts.IsClusterMode:
+ return NewClusterClient(opts.Cluster())
+ default:
+ return NewClient(opts.Simple())
+ }
+}
diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go
new file mode 100644
index 0000000..24a037f
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/version.go
@@ -0,0 +1,6 @@
+package redis
+
+// Version is the current release version.
+func Version() string {
+ return "9.9.0"
+}
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 0000000..c7b459e
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,39 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+cobra.test
+bin
+
+.idea/
+*.iml
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
new file mode 100644
index 0000000..2c8f480
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -0,0 +1,57 @@
+# Copyright 2013-2023 The Cobra Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+run:
+ deadline: 5m
+
+linters:
+ disable-all: true
+ enable:
+ #- bodyclose
+ # - deadcode ! deprecated since v1.49.0; replaced by 'unused'
+ #- depguard
+ #- dogsled
+ #- dupl
+ - errcheck
+ #- exhaustive
+ #- funlen
+ #- gochecknoinits
+ - goconst
+ - gocritic
+ #- gocyclo
+ - gofmt
+ - goimports
+ #- gomnd
+ #- goprintffuncname
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ #- lll
+ - misspell
+ #- nakedret
+ #- noctx
+ - nolintlint
+ #- rowserrcheck
+ #- scopelint
+ - staticcheck
+ #- structcheck ! deprecated since v1.49.0; replaced by 'unused'
+ - stylecheck
+ #- typecheck
+ - unconvert
+ #- unparam
+ - unused
+ # - varcheck ! deprecated since v1.49.0; replaced by 'unused'
+ #- whitespace
+ fast: false
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 0000000..94ec530
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia
+Bjørn Erik Pedersen
+Fabiano Franz
diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md
new file mode 100644
index 0000000..9d16f88
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONDUCT.md
@@ -0,0 +1,37 @@
+## Cobra User Contract
+
+### Versioning
+Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release.
+
+### Backward Compatibility
+We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released.
+
+### Deprecation
+Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github.
+
+### CVE
+Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one.
+
+### Communication
+Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors.
+
+### Breaking Changes
+Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra.
+
+There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version.
+
+Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release.
+
+Examples of breaking changes include:
+- Removing or renaming exported constant, variable, type, or function.
+- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc...
+ - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing.
+
+There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging.
+
+### CI Testing
+Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang.
+
+### Disclaimer
+Changes to this document and the contents therein are at the discretion of the maintainers.
+None of the contents of this document are legally binding in any way to the maintainers or the users.
diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
new file mode 100644
index 0000000..6f356e6
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# Contributing to Cobra
+
+Thank you so much for contributing to Cobra. We appreciate your time and help.
+Here are some guidelines to help you get started.
+
+## Code of Conduct
+
+Be kind and respectful to the members of the community. Take time to educate
+others who are seeking help. Harassment of any kind will not be tolerated.
+
+## Questions
+
+If you have questions regarding Cobra, feel free to ask it in the community
+[#cobra Slack channel][cobra-slack]
+
+## Filing a bug or feature
+
+1. Before filing an issue, please check the existing issues to see if a
+ similar one was already opened. If there is one already opened, feel free
+ to comment on it.
+1. If you believe you've found a bug, please provide detailed steps of
+ reproduction, the version of Cobra and anything else you believe will be
+ useful to help troubleshoot it (e.g. OS environment, environment variables,
+ etc...). Also state the current behavior vs. the expected behavior.
+1. If you'd like to see a feature or an enhancement please open an issue with
+ a clear title and description of what the feature is and why it would be
+ beneficial to the project and its users.
+
+## Submitting changes
+
+1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to
+ sign a CLA. Please sign the CLA :slightly_smiling_face:
+1. Tests: If you are submitting code, please ensure you have adequate tests
+ for the feature. Tests can be run via `go test ./...` or `make test`.
+1. Since this is golang project, ensure the new code is properly formatted to
+ ensure code consistency. Run `make all`.
+
+### Quick steps to contribute
+
+1. Fork the project.
+1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+1. Create your feature branch (`git checkout -b my-new-feature`)
+1. Make changes and run tests (`make test`)
+1. Add them to staging (`git add .`)
+1. Commit your changes (`git commit -m 'Add some feature'`)
+1. Push to the branch (`git push origin my-new-feature`)
+1. Create new pull request
+
+
+[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 0000000..298f0e2
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS
new file mode 100644
index 0000000..4c5ac3d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/MAINTAINERS
@@ -0,0 +1,13 @@
+maintainers:
+- spf13
+- johnSchnake
+- jpmcb
+- marckhouzam
+inactive:
+- anthonyfok
+- bep
+- bogem
+- broady
+- eparis
+- jharshman
+- wfernandes
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
new file mode 100644
index 0000000..0da8d7a
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -0,0 +1,35 @@
+BIN="./bin"
+SRC=$(shell find . -name "*.go")
+
+ifeq (, $(shell which golangci-lint))
+$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
+endif
+
+.PHONY: fmt lint test install_deps clean
+
+default: all
+
+all: fmt test
+
+fmt:
+ $(info ******************** checking formatting ********************)
+ @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
+
+lint:
+ $(info ******************** running lint tools ********************)
+ golangci-lint run -v
+
+test: install_deps
+ $(info ******************** running tests ********************)
+ go test -v ./...
+
+richtest: install_deps
+ $(info ******************** running tests with kyoh86/richgo ********************)
+ richgo test -v ./...
+
+install_deps:
+ $(info ******************** downloading dependencies ********************)
+ go get -v ./...
+
+clean:
+ rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 0000000..7175715
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,113 @@
+
+
+
+Cobra is a library for creating powerful modern CLI applications.
+
+Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
+[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
+name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra.
+
+[](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
+[](https://pkg.go.dev/github.com/spf13/cobra)
+[](https://goreportcard.com/report/github.com/spf13/cobra)
+[](https://gophers.slack.com/archives/CD3LP1199)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Grouping help for subcommands
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell)
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications read like sentences when used, and as a result, users
+intuitively know how to interact with them.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE`
+ or
+`APPNAME COMMAND ARG --FLAG`.
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library.
+
+```
+go get -u github.com/spf13/cobra@latest
+```
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Usage
+`cobra-cli` is a command line program to generate cobra applications and command files.
+It will bootstrap your application scaffolding to rapidly
+develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application.
+
+It can be installed by running:
+
+```
+go install github.com/spf13/cobra-cli@latest
+```
+
+For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
+
+For complete details on using the Cobra library, please read [The Cobra User Guide](site/content/user_guide.md).
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go
new file mode 100644
index 0000000..b3e2dad
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.go
@@ -0,0 +1,60 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ activeHelpMarker = "_activeHelp_ "
+ // The below values should not be changed: programs will be using them explicitly
+ // in their user documentation, and users will be using them explicitly.
+ activeHelpEnvVarSuffix = "ACTIVE_HELP"
+ activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix
+ activeHelpGlobalDisable = "0"
+)
+
+// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp.
+// Such strings will be processed by the completion script and will be shown as ActiveHelp
+// to the user.
+// The array parameter should be the array that will contain the completions.
+// This function can be called multiple times before and/or after completions are added to
+// the array. Each time this function is called with the same array, the new
+// ActiveHelp line will be shown below the previous ones when completion is triggered.
+func AppendActiveHelp(compArray []Completion, activeHelpStr string) []Completion {
+ return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr))
+}
+
+// GetActiveHelpConfig returns the value of the ActiveHelp environment variable
+// _ACTIVE_HELP where is the name of the root command in upper
+// case, with all non-ASCII-alphanumeric characters replaced by `_`.
+// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP
+// is set to "0".
+func GetActiveHelpConfig(cmd *Command) string {
+ activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar)
+ if activeHelpCfg != activeHelpGlobalDisable {
+ activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name()))
+ }
+ return activeHelpCfg
+}
+
+// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment
+// variable. It has the format _ACTIVE_HELP where is the name of the
+// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
+func activeHelpEnvVar(name string) string {
+ return configEnvVar(name, activeHelpEnvVarSuffix)
+}
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 0000000..ed1e70c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,131 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// legacyArgs validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if there are any positional args that are not in
+// the `ValidArgs` field of `Command`
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ // Remove any description that may be included in ValidArgs.
+ // A description is following a tab character.
+ validArgs := make([]string, 0, len(cmd.ValidArgs))
+ for _, v := range cmd.ValidArgs {
+ validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0])
+ }
+ for _, v := range args {
+ if !stringInSlice(v, validArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
+
+// MatchAll allows combining several PositionalArgs to work in concert.
+func MatchAll(pargs ...PositionalArgs) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ for _, parg := range pargs {
+ if err := parg(cmd, args); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+// ExactValidArgs returns an error if there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+//
+// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead
+func ExactValidArgs(n int) PositionalArgs {
+ return MatchAll(ExactArgs(n), OnlyValidArgs)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 0000000..f4d198c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,709 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf io.StringWriter, name string) {
+ WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_go_custom_completion()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ local out requestComp lastParam lastChar comp directive args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows handling aliases
+ args=("${words[@]:1}")
+ # Disable ActiveHelp which is not supported for bash completion v1
+ requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ return
+ else
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ compopt -o nospace
+ fi
+ fi
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ compopt +o default
+ fi
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+ # Do not use quotes around the $out variable or else newline
+ # characters will be kept.
+ for filter in ${out}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subdir
+ # Use printf to strip any trailing newline
+ subdir=$(printf "%%s" "${out}")
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ __%[1]s_handle_subdirs_in_dir_flag "$subdir"
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${out}" -- "$cur")
+ fi
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ local comp
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${allflags[*]}" -- "$cur")
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION:-}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+
+ if [[ -z "${flag_parsing_disabled}" ]]; then
+ # If flag parsing is enabled, we have completed the flags and can return.
+ # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough
+ # to possibly call handle_go_custom_completion.
+ return 0;
+ fi
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_noun[@]}")
+ elif [[ -n "${has_completion_function}" ]]; then
+ # if a go completion function is provided, defer to that function
+ __%[1]s_handle_go_custom_completion
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue=""
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name, ShellCompNoDescRequestCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
+}
+
+func writePostscript(buf io.StringWriter, name string) {
+ name = strings.ReplaceAll(name, ":", "__")
+ WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`{
+ local cur prev words cword split
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flag_parsing_disabled=
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local command_aliases=()
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local has_completion_function=""
+ local last_command=""
+ local nouns=()
+ local noun_aliases=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ WriteStringAndCheck(buf, " flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+const cbn = "\")\n"
+
+func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.NoOptDefVal) == 0 {
+ format = " two_word_flags+=(\"--%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ }
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn
+ if len(flag.NoOptDefVal) == 0 {
+ format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
+ }
+}
+
+// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags
+func prepareCustomAnnotationsForFlags(cmd *Command) {
+ flagCompletionMutex.RLock()
+ defer flagCompletionMutex.RUnlock()
+ for flag := range flagCompletionFunctions {
+ // Make sure the completion script calls the __*_go_custom_completion function for
+ // every registered flag. We need to do this here (and not when the flag was registered
+ // for completion) so that we can know the root command name for the prefix
+ // of ___go_custom_completion
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
+ }
+}
+
+func writeFlags(buf io.StringWriter, cmd *Command) {
+ prepareCustomAnnotationsForFlags(cmd)
+ WriteStringAndCheck(buf, ` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+
+ if cmd.DisableFlagParsing {
+ WriteStringAndCheck(buf, " flag_parsing_disabled=1\n")
+ }
+
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ // localNonPersistentFlags are used to stop the completion of subcommands when one is set
+ // if TraverseChildren is true we should allow to complete subcommands
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeRequiredFlag(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok {
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand))
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_noun=()\n")
+ sort.Strings(cmd.ValidArgs)
+ for _, value := range cmd.ValidArgs {
+ // Remove any description that may be included following a tab character.
+ // Descriptions are not supported by bash completion.
+ value = strings.SplitN(value, "\t", 2)[0]
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+ if cmd.ValidArgsFunction != nil {
+ WriteStringAndCheck(buf, " has_completion_function=1\n")
+ }
+}
+
+func writeCmdAliases(buf io.StringWriter, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Strings(cmd.Aliases)
+
+ WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ WriteStringAndCheck(buf, ` fi`)
+ WriteStringAndCheck(buf, "\n")
+}
+func writeArgAliases(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " noun_aliases=()\n")
+ sort.Strings(cmd.ArgAliases)
+ for _, value := range cmd.ArgAliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf io.StringWriter, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.ReplaceAll(commandName, " ", "_")
+ commandName = strings.ReplaceAll(commandName, ":", "__")
+
+ if cmd.Root() == cmd {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName))
+ WriteStringAndCheck(buf, "\n")
+ WriteStringAndCheck(buf, " command_aliases=()\n")
+ WriteStringAndCheck(buf, "\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ WriteStringAndCheck(buf, "}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go
new file mode 100644
index 0000000..d2397aa
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go
@@ -0,0 +1,484 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genBashComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genBashComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Macs have bash3 for which the bash-completion package doesn't include
+# _init_completion. This is a minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+# This function calls the %[1]s program to obtain the completion
+# results and the directive. It fills the 'out' and 'directive' vars.
+__%[1]s_get_completion_results() {
+ local requestComp lastParam lastChar args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows handling aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [[ -z ${cur} && ${lastChar} != = ]]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} ''"
+ fi
+
+ # When completing a flag with an = (e.g., %[1]s -n=)
+ # bash focuses on the part after the =, so we need to remove
+ # the flag part from $cur
+ if [[ ${cur} == -*=* ]]; then
+ cur="${cur#*=}"
+ fi
+
+ __%[1]s_debug "Calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [[ ${directive} == "${out}" ]]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "The completion directive is: ${directive}"
+ __%[1]s_debug "The completions are: ${out}"
+}
+
+__%[1]s_process_completion_results() {
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveKeepOrder=%[8]d
+
+ if (((directive & shellCompDirectiveError) != 0)); then
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ else
+ if (((directive & shellCompDirectiveNoSpace) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ __%[1]s_debug "Activating no space"
+ compopt -o nospace
+ else
+ __%[1]s_debug "No space directive not supported in this version of bash"
+ fi
+ fi
+ if (((directive & shellCompDirectiveKeepOrder) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ # no sort isn't supported for bash less than < 4.4
+ if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then
+ __%[1]s_debug "No sort directive not supported in this version of bash"
+ else
+ __%[1]s_debug "Activating keep order"
+ compopt -o nosort
+ fi
+ else
+ __%[1]s_debug "No sort directive not supported in this version of bash"
+ fi
+ fi
+ if (((directive & shellCompDirectiveNoFileComp) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ __%[1]s_debug "Activating no file completion"
+ compopt +o default
+ else
+ __%[1]s_debug "No file completion directive not supported in this version of bash"
+ fi
+ fi
+ fi
+
+ # Separate activeHelp from normal completions
+ local completions=()
+ local activeHelp=()
+ __%[1]s_extract_activeHelp
+
+ if (((directive & shellCompDirectiveFilterFileExt) != 0)); then
+ # File extension filtering
+ local fullFilter="" filter filteringCmd
+
+ # Do not use quotes around the $completions variable or else newline
+ # characters will be kept.
+ for filter in ${completions[*]}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif (((directive & shellCompDirectiveFilterDirs) != 0)); then
+ # File completion for directories only
+
+ local subdir
+ subdir=${completions[0]}
+ if [[ -n $subdir ]]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ __%[1]s_handle_completion_types
+ fi
+
+ __%[1]s_handle_special_char "$cur" :
+ __%[1]s_handle_special_char "$cur" =
+
+ # Print the activeHelp statements before we finish
+ __%[1]s_handle_activeHelp
+}
+
+__%[1]s_handle_activeHelp() {
+ # Print the activeHelp statements
+ if ((${#activeHelp[*]} != 0)); then
+ if [ -z $COMP_TYPE ]; then
+ # Bash v3 does not set the COMP_TYPE variable.
+ printf "\n";
+ printf "%%s\n" "${activeHelp[@]}"
+ printf "\n"
+ __%[1]s_reprint_commandLine
+ return
+ fi
+
+ # Only print ActiveHelp on the second TAB press
+ if [ $COMP_TYPE -eq 63 ]; then
+ printf "\n"
+ printf "%%s\n" "${activeHelp[@]}"
+
+ if ((${#COMPREPLY[*]} == 0)); then
+ # When there are no completion choices from the program, file completion
+ # may kick in if the program has not disabled it; in such a case, we want
+ # to know if any files will match what the user typed, so that we know if
+ # there will be completions presented, so that we know how to handle ActiveHelp.
+ # To find out, we actually trigger the file completion ourselves;
+ # the call to _filedir will fill COMPREPLY if files match.
+ if (((directive & shellCompDirectiveNoFileComp) == 0)); then
+ __%[1]s_debug "Listing files"
+ _filedir
+ fi
+ fi
+
+ if ((${#COMPREPLY[*]} != 0)); then
+ # If there are completion choices to be shown, print a delimiter.
+ # Re-printing the command-line will automatically be done
+ # by the shell when it prints the completion choices.
+ printf -- "--"
+ else
+ # When there are no completion choices at all, we need
+ # to re-print the command-line since the shell will
+ # not be doing it itself.
+ __%[1]s_reprint_commandLine
+ fi
+ elif [ $COMP_TYPE -eq 37 ] || [ $COMP_TYPE -eq 42 ]; then
+ # For completion type: menu-complete/menu-complete-backward and insert-completions
+ # the completions are immediately inserted into the command-line, so we first
+ # print the activeHelp message and reprint the command-line since the shell won't.
+ printf "\n"
+ printf "%%s\n" "${activeHelp[@]}"
+
+ __%[1]s_reprint_commandLine
+ fi
+ fi
+}
+
+__%[1]s_reprint_commandLine() {
+ # The prompt format is only available from bash 4.4.
+ # We test if it is available before using it.
+ if (x=${PS1@P}) 2> /dev/null; then
+ printf "%%s" "${PS1@P}${COMP_LINE[@]}"
+ else
+ # Can't print the prompt. Just print the
+ # text the user had typed, it is workable enough.
+ printf "%%s" "${COMP_LINE[@]}"
+ fi
+}
+
+# Separate activeHelp lines from real completions.
+# Fills the $activeHelp and $completions arrays.
+__%[1]s_extract_activeHelp() {
+ local activeHelpMarker="%[9]s"
+ local endIndex=${#activeHelpMarker}
+
+ while IFS='' read -r comp; do
+ [[ -z $comp ]] && continue
+
+ if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then
+ comp=${comp:endIndex}
+ __%[1]s_debug "ActiveHelp found: $comp"
+ if [[ -n $comp ]]; then
+ activeHelp+=("$comp")
+ fi
+ else
+ # Not an activeHelp line but a normal completion
+ completions+=("$comp")
+ fi
+ done <<<"${out}"
+}
+
+__%[1]s_handle_completion_types() {
+ __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE"
+
+ case $COMP_TYPE in
+ 37|42)
+ # Type: menu-complete/menu-complete-backward and insert-completions
+ # If the user requested inserting one completion at a time, or all
+ # completions at once on the command-line we must remove the descriptions.
+ # https://github.com/spf13/cobra/issues/1508
+
+ # If there are no completions, we don't need to do anything
+ (( ${#completions[@]} == 0 )) && return 0
+
+ local tab=$'\t'
+
+ # Strip any description and escape the completion to handled special characters
+ IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]%%%%$tab*}")
+
+ # Only consider the completions that match
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}")
+
+ # compgen looses the escaping so we need to escape all completions again since they will
+ # all be inserted on the command-line.
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(printf "%%q\n" "${COMPREPLY[@]}")
+ ;;
+
+ *)
+ # Type: complete (normal completion)
+ __%[1]s_handle_standard_completion_case
+ ;;
+ esac
+}
+
+__%[1]s_handle_standard_completion_case() {
+ local tab=$'\t'
+
+ # If there are no completions, we don't need to do anything
+ (( ${#completions[@]} == 0 )) && return 0
+
+ # Short circuit to optimize if we don't have descriptions
+ if [[ "${completions[*]}" != *$tab* ]]; then
+ # First, escape the completions to handle special characters
+ IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]}")
+ # Only consider the completions that match what the user typed
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}")
+
+ # compgen looses the escaping so, if there is only a single completion, we need to
+ # escape it again because it will be inserted on the command-line. If there are multiple
+ # completions, we don't want to escape them because they will be printed in a list
+ # and we don't want to show escape characters in that list.
+ if (( ${#COMPREPLY[@]} == 1 )); then
+ COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]}")
+ fi
+ return 0
+ fi
+
+ local longest=0
+ local compline
+ # Look for the longest completion so that we can format things nicely
+ while IFS='' read -r compline; do
+ [[ -z $compline ]] && continue
+
+ # Before checking if the completion matches what the user typed,
+ # we need to strip any description and escape the completion to handle special
+ # characters because those escape characters are part of what the user typed.
+ # Don't call "printf" in a sub-shell because it will be much slower
+ # since we are in a loop.
+ printf -v comp "%%q" "${compline%%%%$tab*}" &>/dev/null || comp=$(printf "%%q" "${compline%%%%$tab*}")
+
+ # Only consider the completions that match
+ [[ $comp == "$cur"* ]] || continue
+
+ # The completions matches. Add it to the list of full completions including
+ # its description. We don't escape the completion because it may get printed
+ # in a list if there are more than one and we don't want show escape characters
+ # in that list.
+ COMPREPLY+=("$compline")
+
+ # Strip any description before checking the length, and again, don't escape
+ # the completion because this length is only used when printing the completions
+ # in a list and we don't want show escape characters in that list.
+ comp=${compline%%%%$tab*}
+ if ((${#comp}>longest)); then
+ longest=${#comp}
+ fi
+ done < <(printf "%%s\n" "${completions[@]}")
+
+ # If there is a single completion left, remove the description text and escape any special characters
+ if ((${#COMPREPLY[*]} == 1)); then
+ __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
+ COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]%%%%$tab*}")
+ __%[1]s_debug "Removed description from single completion, which is now: ${COMPREPLY[0]}"
+ else
+ # Format the descriptions
+ __%[1]s_format_comp_descriptions $longest
+ fi
+}
+
+__%[1]s_handle_special_char()
+{
+ local comp="$1"
+ local char=$2
+ if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
+ local word=${comp%%"${comp##*${char}}"}
+ local idx=${#COMPREPLY[*]}
+ while ((--idx >= 0)); do
+ COMPREPLY[idx]=${COMPREPLY[idx]#"$word"}
+ done
+ fi
+}
+
+__%[1]s_format_comp_descriptions()
+{
+ local tab=$'\t'
+ local comp desc maxdesclength
+ local longest=$1
+
+ local i ci
+ for ci in ${!COMPREPLY[*]}; do
+ comp=${COMPREPLY[ci]}
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ __%[1]s_debug "Original comp: $comp"
+ desc=${comp#*$tab}
+ comp=${comp%%%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if ((maxdesclength > 8)); then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
+
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if ((maxdesclength > 0)); then
+ if ((${#desc} > maxdesclength)); then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
+ fi
+ COMPREPLY[ci]=$comp
+ __%[1]s_debug "Final comp: $comp"
+ fi
+ done
+}
+
+__start_%[1]s()
+{
+ local cur prev words cword split
+
+ COMPREPLY=()
+
+ # Call _init_completion from the bash-completion package
+ # to prepare the arguments properly
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -n =: || return
+ else
+ __%[1]s_init_completion -n =: || return
+ fi
+
+ __%[1]s_debug
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $cword location, so we need
+ # to truncate the command-line ($words) up to the $cword location.
+ words=("${words[@]:0:$cword+1}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ local out directive
+ __%[1]s_get_completion_results
+ __%[1]s_process_completion_results
+}
+
+if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%[1]s %[1]s
+else
+ complete -o default -o nospace -F __start_%[1]s %[1]s
+fi
+
+# ex: ts=4 sw=4 et filetype=sh
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
+ activeHelpMarker))
+}
+
+// GenBashCompletionFileV2 generates Bash completion version 2.
+func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletionV2(outFile, includeDesc)
+}
+
+// GenBashCompletionV2 generates Bash completion file version 2
+// and writes it to the passed writer.
+func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error {
+ return c.genBashCompletion(w, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 0000000..d9cd241
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,246 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+var finalizers []func()
+
+const (
+ defaultPrefixMatching = false
+ defaultCommandSorting = true
+ defaultCaseInsensitive = false
+ defaultTraverseRunHooks = false
+)
+
+// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = defaultPrefixMatching
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = defaultCommandSorting
+
+// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default)
+var EnableCaseInsensitive = defaultCaseInsensitive
+
+// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents.
+// By default this is disabled, which means only the first run hook to be found is executed.
+var EnableTraverseRunHooks = defaultTraverseRunHooks
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
+// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
+// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapDisplayDuration = 5 * time.Second
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// OnFinalize sets the passed functions to be run when each command's
+// Execute method is terminated.
+func OnFinalize(y ...func()) {
+ finalizers = append(finalizers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ formattedString := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(formattedString, s)
+}
+
+func tmpl(text string) *tmplFunc {
+ return &tmplFunc{
+ tmpl: text,
+ fn: func(w io.Writer, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+ },
+ }
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing.
+func CheckErr(msg interface{}) {
+ if msg != nil {
+ fmt.Fprintln(os.Stderr, "Error:", msg)
+ os.Exit(1)
+ }
+}
+
+// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil.
+func WriteStringAndCheck(b io.StringWriter, s string) {
+ _, err := b.WriteString(s)
+ CheckErr(err)
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 0000000..dbb2c29
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,2067 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra"
+ CommandDisplayNameAnnotation = "cobra_annotation_command_display_name"
+
+ helpFlagName = "help"
+ helpCommandName = "help"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Group Structure to manage groups for commands
+type Group struct {
+ ID string
+ Title string
+}
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ // Recommended syntax is as follows:
+ // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
+ // ... indicates that you can specify multiple values for the previous argument.
+ // | indicates mutually exclusive information. You can use the argument to the left of the separator or the
+ // argument to the right of the separator. You cannot use both arguments in a single use of the command.
+ // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
+ // optional, they are enclosed in brackets ([ ]).
+ // Example: add [-F file | -D dir]... [-f format] profile
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // The group id under which this subcommand is grouped in the 'help' output of its parent.
+ GroupID string
+
+ // Long is the long message shown in the 'help ' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
+ ValidArgs []Completion
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
+ // It is a dynamic version of using ValidArgs.
+ // Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ ValidArgsFunction CompletionFunc
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the shell completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
+ // For portability with other shells, it is recommended to instead use ValidArgsFunction
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands or set special options.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ // command does not define one.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ // The *PreRun and *PostRun functions will only be executed if the Run function of the current
+ // command has been declared.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // groups for subcommands
+ commandgroups []*Group
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate *tmplFunc
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate *tmplFunc
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // helpCommandGroupID is the group id for the helpCommand
+ helpCommandGroupID string
+
+ // completionCommandGroupID is the group id for the completion command
+ completionCommandGroupID string
+
+ // versionTemplate is the version template defined by user.
+ versionTemplate *tmplFunc
+
+ // errPrefix is the error message prefix defined by user.
+ errPrefix string
+
+ // inReader is a reader defined by the user that replaces stdin
+ inReader io.Reader
+ // outWriter is a writer defined by the user that replaces stdout
+ outWriter io.Writer
+ // errWriter is a writer defined by the user that replaces stderr
+ errWriter io.Writer
+
+ // FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // CompletionOptions is a set of options to control the handling of shell completion
+ CompletionOptions CompletionOptions
+
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ ctx context.Context
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+}
+
+// Context returns underlying command context. If command was executed
+// with ExecuteContext or the context was set with SetContext, the
+// previously set context will be returned. Otherwise, nil is returned.
+//
+// Notice that a call to Execute and ExecuteC will replace a nil context of
+// a command with a context.Background, so a background context will be
+// returned by Context after one of these functions has been called.
+func (c *Command) Context() context.Context {
+ return c.ctx
+}
+
+// SetContext sets context for the command. This context will be overwritten by
+// Command.ExecuteContext or Command.ExecuteContextC.
+func (c *Command) SetContext(ctx context.Context) {
+ c.ctx = ctx
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+//
+// Deprecated: Use SetOut and/or SetErr instead
+func (c *Command) SetOutput(output io.Writer) {
+ c.outWriter = output
+ c.errWriter = output
+}
+
+// SetOut sets the destination for usage messages.
+// If newOut is nil, os.Stdout is used.
+func (c *Command) SetOut(newOut io.Writer) {
+ c.outWriter = newOut
+}
+
+// SetErr sets the destination for error messages.
+// If newErr is nil, os.Stderr is used.
+func (c *Command) SetErr(newErr io.Writer) {
+ c.errWriter = newErr
+}
+
+// SetIn sets the source for input data
+// If newIn is nil, os.Stdin is used.
+func (c *Command) SetIn(newIn io.Reader) {
+ c.inReader = newIn
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ if s == "" {
+ c.usageTemplate = nil
+ return
+ }
+ c.usageTemplate = tmpl(s)
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpCommandGroupID sets the group id of the help command.
+func (c *Command) SetHelpCommandGroupID(groupID string) {
+ if c.helpCommand != nil {
+ c.helpCommand.GroupID = groupID
+ }
+ // helpCommandGroupID is used if no helpCommand is defined by the user
+ c.helpCommandGroupID = groupID
+}
+
+// SetCompletionCommandGroupID sets the group id of the completion command.
+func (c *Command) SetCompletionCommandGroupID(groupID string) {
+ // completionCommandGroupID is used if no completion command is defined by the user
+ c.Root().completionCommandGroupID = groupID
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ if s == "" {
+ c.helpTemplate = nil
+ return
+ }
+ c.helpTemplate = tmpl(s)
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ if s == "" {
+ c.versionTemplate = nil
+ return
+ }
+ c.versionTemplate = tmpl(s)
+}
+
+// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix.
+func (c *Command) SetErrPrefix(s string) {
+ c.errPrefix = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) ErrOrStderr() io.Writer {
+ return c.getErr(os.Stderr)
+}
+
+// InOrStdin returns input to stdin
+func (c *Command) InOrStdin() io.Reader {
+ return c.getIn(os.Stdin)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.outWriter != nil {
+ return c.outWriter
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+func (c *Command) getErr(def io.Writer) io.Writer {
+ if c.errWriter != nil {
+ return c.errWriter
+ }
+ if c.HasParent() {
+ return c.parent.getErr(def)
+ }
+ return def
+}
+
+func (c *Command) getIn(def io.Reader) io.Reader {
+ if c.inReader != nil {
+ return c.inReader
+ }
+ if c.HasParent() {
+ return c.parent.getIn(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ fn := c.getUsageTemplateFunc()
+ err := fn(c.OutOrStderr(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ return err
+ }
+}
+
+// getUsageTemplateFunc returns the usage template function for the command
+// going up the command tree if necessary.
+func (c *Command) getUsageTemplateFunc() func(w io.Writer, data interface{}) error {
+ if c.usageTemplate != nil {
+ return c.usageTemplate.fn
+ }
+
+ if c.HasParent() {
+ return c.parent.getUsageTemplateFunc()
+ }
+ return defaultUsageFunc
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ fn := c.getHelpTemplateFunc()
+ // The help should be sent to stdout
+ // See https://github.com/spf13/cobra/issues/1002
+ err := fn(c.OutOrStdout(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ }
+}
+
+// getHelpTemplateFunc returns the help template function for the command
+// going up the command tree if necessary.
+func (c *Command) getHelpTemplateFunc() func(w io.Writer, data interface{}) error {
+ if c.helpTemplate != nil {
+ return c.helpTemplate.fn
+ }
+
+ if c.HasParent() {
+ return c.parent.getHelpTemplateFunc()
+ }
+
+ return defaultHelpFunc
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString returns usage string.
+func (c *Command) UsageString() string {
+ // Storing normal writers
+ tmpOutput := c.outWriter
+ tmpErr := c.errWriter
+
+ bb := new(bytes.Buffer)
+ c.outWriter = bb
+ c.errWriter = bb
+
+ CheckErr(c.Usage())
+
+ // Setting things back to normal
+ c.outWriter = tmpOutput
+ c.errWriter = tmpErr
+
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+// This function is kept for backwards-compatibility reasons.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != nil {
+ return c.usageTemplate.tmpl
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return defaultUsageTemplate
+}
+
+// HelpTemplate return help template for the command.
+// This function is kept for backwards-compatibility reasons.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != nil {
+ return c.helpTemplate.tmpl
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return defaultHelpTemplate
+}
+
+// VersionTemplate return version template for the command.
+// This function is kept for backwards-compatibility reasons.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != nil {
+ return c.versionTemplate.tmpl
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return defaultVersionTemplate
+}
+
+// getVersionTemplateFunc returns the version template function for the command
+// going up the command tree if necessary.
+func (c *Command) getVersionTemplateFunc() func(w io.Writer, data interface{}) error {
+ if c.versionTemplate != nil {
+ return c.versionTemplate.fn
+ }
+
+ if c.HasParent() {
+ return c.parent.getVersionTemplateFunc()
+ }
+ return defaultVersionFunc
+}
+
+// ErrPrefix return error message prefix for the command
+func (c *Command) ErrPrefix() string {
+ if c.errPrefix != "" {
+ return c.errPrefix
+ }
+
+ if c.HasParent() {
+ return c.parent.ErrPrefix()
+ }
+ return "Error:"
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+// Special care needs to be taken not to remove a flag value.
+func (c *Command) argsMinusFirstX(args []string, x string) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+ flags := c.Flags()
+
+Loop:
+ for pos := 0; pos < len(args); pos++ {
+ s := args[pos]
+ switch {
+ case s == "--":
+ // -- means we have reached the end of the parseable args. Break out of the loop now.
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ fallthrough
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip
+ // over the next arg, because that is the value of this flag.
+ pos++
+ continue
+ case !strings.HasPrefix(s, "-"):
+ // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so,
+ // return the args, excluding the one at this position.
+ if s == x {
+ ret := make([]string, 0, len(args)-1)
+ ret = append(ret, args[:pos]...)
+ ret = append(ret, args[pos+1:]...)
+ return ret
+ }
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[0:2] == "--") ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ var sb strings.Builder
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ sb.WriteString("\n\nDid you mean this?\n")
+ for _, s := range suggestions {
+ _, _ = fmt.Fprintf(&sb, "\t%v\n", s)
+ }
+ }
+ return sb.String()
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if commandNameMatches(cmd.Name(), next) || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ // Temporarily disable gosec G602, which produces a false positive.
+ // See https://github.com/securego/gosec/issues/1005.
+ return matches[0] // #nosec G602
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool(helpFlagName)
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ fn := c.getVersionTemplateFunc()
+ err := fn(c.OutOrStdout(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ defer c.postRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ parents := make([]*Command, 0, 5)
+ for p := c; p != nil; p = p.Parent() {
+ if EnableTraverseRunHooks {
+ // When EnableTraverseRunHooks is set:
+ // - Execute all persistent pre-runs from the root parent till this command.
+ // - Execute all persistent post-runs from this command till the root parent.
+ parents = append([]*Command{p}, parents...)
+ } else {
+ // Otherwise, execute only the first found persistent hook.
+ parents = append(parents, p)
+ }
+ }
+ for _, p := range parents {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ if !EnableTraverseRunHooks {
+ break
+ }
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ if !EnableTraverseRunHooks {
+ break
+ }
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.ValidateRequiredFlags(); err != nil {
+ return err
+ }
+ if err := c.ValidateFlagGroups(); err != nil {
+ return err
+ }
+
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ if !EnableTraverseRunHooks {
+ break
+ }
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ if !EnableTraverseRunHooks {
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+func (c *Command) postRun() {
+ for _, x := range finalizers {
+ x()
+ }
+}
+
+// ExecuteContext is the same as Execute(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContext(ctx context.Context) error {
+ c.ctx = ctx
+ return c.Execute()
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) {
+ c.ctx = ctx
+ return c.ExecuteC()
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ if c.ctx == nil {
+ c.ctx = context.Background()
+ }
+
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help at the last point to allow for user overriding
+ c.InitDefaultHelpCmd()
+
+ args := c.args
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ }
+
+ // initialize the __complete command to be used for shell completion
+ c.initCompleteCmd(args)
+
+ // initialize the default completion command
+ c.InitDefaultCompletionCmd(args...)
+
+ // Now that all commands have been created, let's make sure all groups
+ // are properly created also
+ c.checkCommandGroups()
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.PrintErrln(c.ErrPrefix(), err.Error())
+ c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ // We have to pass global context to children command
+ // if context is present on the parent command.
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if errors.Is(err, flag.ErrHelp) {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilenceErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.PrintErrln(cmd.ErrPrefix(), err.Error())
+ }
+
+ // If root command has SilenceUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return ArbitraryArgs(c, args)
+ }
+ return c.Args(c, args)
+}
+
+// ValidateRequiredFlags validates all required flags are present and returns an error otherwise
+func (c *Command) ValidateRequiredFlags() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// checkCommandGroups checks if a command has been added to a group that does not exists.
+// If so, we panic because it indicates a coding error that should be corrected.
+func (c *Command) checkCommandGroups() {
+ for _, sub := range c.commands {
+ // if Group is not defined let the developer know right away
+ if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) {
+ panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath()))
+ }
+
+ sub.checkCommandGroups()
+ }
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup(helpFlagName) == nil {
+ usage := "help for "
+ name := c.DisplayName()
+ if name == "" {
+ usage += "this command"
+ } else {
+ usage += name
+ }
+ c.Flags().BoolP(helpFlagName, "h", false, usage)
+ _ = c.Flags().SetAnnotation(helpFlagName, FlagSetByCobraAnnotation, []string{"true"})
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.DisplayName()
+ }
+ if c.Flags().ShorthandLookup("v") == nil {
+ c.Flags().BoolP("version", "v", false, usage)
+ } else {
+ c.Flags().Bool("version", false, usage)
+ }
+ _ = c.Flags().SetAnnotation("version", FlagSetByCobraAnnotation, []string{"true"})
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.DisplayName() + ` help [path to command] for full details.`,
+ ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) {
+ var completions []Completion
+ cmd, _, e := c.Root().Find(args)
+ if e != nil {
+ return nil, ShellCompDirectiveNoFileComp
+ }
+ if cmd == nil {
+ // Root help command.
+ cmd = c.Root()
+ }
+ for _, subCmd := range cmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short))
+ }
+ }
+ }
+ return completions, ShellCompDirectiveNoFileComp
+ },
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ CheckErr(c.Root().Usage())
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown
+ CheckErr(cmd.Help())
+ }
+ },
+ GroupID: c.helpCommandGroupID,
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// Groups returns a slice of child command groups.
+func (c *Command) Groups() []*Group {
+ return c.commandgroups
+}
+
+// AllChildCommandsHaveGroup returns if all subcommands are assigned to a group
+func (c *Command) AllChildCommandsHaveGroup() bool {
+ for _, sub := range c.commands {
+ if (sub.IsAvailableCommand() || sub == c.helpCommand) && sub.GroupID == "" {
+ return false
+ }
+ }
+ return true
+}
+
+// ContainsGroup return if groupID exists in the list of command groups.
+func (c *Command) ContainsGroup(groupID string) bool {
+ for _, x := range c.commandgroups {
+ if x.ID == groupID {
+ return true
+ }
+ }
+ return false
+}
+
+// AddGroup adds one or more command groups to this parent command.
+func (c *Command) AddGroup(groups ...*Group) {
+ c.commandgroups = append(c.commandgroups, groups...)
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErr(i ...interface{}) {
+ fmt.Fprint(c.ErrOrStderr(), i...)
+}
+
+// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrln(i ...interface{}) {
+ c.PrintErr(fmt.Sprintln(i...))
+}
+
+// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrf(format string, i ...interface{}) {
+ c.PrintErr(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.DisplayName()
+}
+
+// DisplayName returns the name to display in help text. Returns command Name()
+// If CommandDisplayNameAnnoation is not set
+func (c *Command) DisplayName() string {
+ if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok {
+ return displayName
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ use := strings.Replace(c.Use, c.Name(), c.DisplayName(), 1)
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + use
+ } else {
+ useline = use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if commandNameMatches(a, s) {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ // Add the flag if it is not a parent PFlag, or it shadows a parent PFlag
+ if c.lflags.Lookup(f.Name) == nil && f != c.parentsPflags.Lookup(f.Name) {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parent commands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ // do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
+
+// commandNameMatches checks if two command names are equal
+// taking into account case sensitivity according to
+// EnableCaseInsensitive global configuration.
+func commandNameMatches(s string, t string) bool {
+ if EnableCaseInsensitive {
+ return strings.EqualFold(s, t)
+ }
+
+ return s == t
+}
+
+// tmplFunc holds a template and a function that will execute said template.
+type tmplFunc struct {
+ tmpl string
+ fn func(io.Writer, interface{}) error
+}
+
+var defaultUsageTemplate = `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}}
+
+Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}}
+
+{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}}
+
+Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+
+// defaultUsageFunc is equivalent to executing defaultUsageTemplate. The two should be changed in sync.
+func defaultUsageFunc(w io.Writer, in interface{}) error {
+ c := in.(*Command)
+ fmt.Fprint(w, "Usage:")
+ if c.Runnable() {
+ fmt.Fprintf(w, "\n %s", c.UseLine())
+ }
+ if c.HasAvailableSubCommands() {
+ fmt.Fprintf(w, "\n %s [command]", c.CommandPath())
+ }
+ if len(c.Aliases) > 0 {
+ fmt.Fprintf(w, "\n\nAliases:\n")
+ fmt.Fprintf(w, " %s", c.NameAndAliases())
+ }
+ if c.HasExample() {
+ fmt.Fprintf(w, "\n\nExamples:\n")
+ fmt.Fprintf(w, "%s", c.Example)
+ }
+ if c.HasAvailableSubCommands() {
+ cmds := c.Commands()
+ if len(c.Groups()) == 0 {
+ fmt.Fprintf(w, "\n\nAvailable Commands:")
+ for _, subcmd := range cmds {
+ if subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName {
+ fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short)
+ }
+ }
+ } else {
+ for _, group := range c.Groups() {
+ fmt.Fprintf(w, "\n\n%s", group.Title)
+ for _, subcmd := range cmds {
+ if subcmd.GroupID == group.ID && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) {
+ fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short)
+ }
+ }
+ }
+ if !c.AllChildCommandsHaveGroup() {
+ fmt.Fprintf(w, "\n\nAdditional Commands:")
+ for _, subcmd := range cmds {
+ if subcmd.GroupID == "" && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) {
+ fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short)
+ }
+ }
+ }
+ }
+ }
+ if c.HasAvailableLocalFlags() {
+ fmt.Fprintf(w, "\n\nFlags:\n")
+ fmt.Fprint(w, trimRightSpace(c.LocalFlags().FlagUsages()))
+ }
+ if c.HasAvailableInheritedFlags() {
+ fmt.Fprintf(w, "\n\nGlobal Flags:\n")
+ fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages()))
+ }
+ if c.HasHelpSubCommands() {
+ fmt.Fprintf(w, "\n\nAdditional help topcis:")
+ for _, subcmd := range c.Commands() {
+ if subcmd.IsAdditionalHelpTopicCommand() {
+ fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short)
+ }
+ }
+ }
+ if c.HasAvailableSubCommands() {
+ fmt.Fprintf(w, "\n\nUse \"%s [command] --help\" for more information about a command.", c.CommandPath())
+ }
+ fmt.Fprintln(w)
+ return nil
+}
+
+var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+
+// defaultHelpFunc is equivalent to executing defaultHelpTemplate. The two should be changed in sync.
+func defaultHelpFunc(w io.Writer, in interface{}) error {
+ c := in.(*Command)
+ usage := c.Long
+ if usage == "" {
+ usage = c.Short
+ }
+ usage = trimRightSpace(usage)
+ if usage != "" {
+ fmt.Fprintln(w, usage)
+ fmt.Fprintln(w)
+ }
+ if c.Runnable() || c.HasSubCommands() {
+ fmt.Fprint(w, c.UsageString())
+ }
+ return nil
+}
+
+var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+
+// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync.
+func defaultVersionFunc(w io.Writer, in interface{}) error {
+ c := in.(*Command)
+ _, err := fmt.Fprintf(w, "%s version %s\n", c.DisplayName(), c.Version)
+ return err
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 0000000..307f0c1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,20 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 0000000..adbef39
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,41 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+// +build windows
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ if MousetrapDisplayDuration > 0 {
+ time.Sleep(MousetrapDisplayDuration)
+ } else {
+ c.Println("Press return to continue...")
+ fmt.Scanln()
+ }
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go
new file mode 100644
index 0000000..a1752f7
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/completions.go
@@ -0,0 +1,1005 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ // ShellCompRequestCmd is the name of the hidden command that is used to request
+ // completion results from the program. It is used by the shell completion scripts.
+ ShellCompRequestCmd = "__complete"
+ // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
+ // completion results without their description. It is used by the shell completion scripts.
+ ShellCompNoDescRequestCmd = "__completeNoDesc"
+)
+
+// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it.
+var flagCompletionFunctions = map[*pflag.Flag]CompletionFunc{}
+
+// lock for reading and writing from flagCompletionFunctions
+var flagCompletionMutex = &sync.RWMutex{}
+
+// ShellCompDirective is a bit map representing the different behaviors the shell
+// can be instructed to have once completions have been provided.
+type ShellCompDirective int
+
+type flagCompError struct {
+ subCommand string
+ flagName string
+}
+
+func (e *flagCompError) Error() string {
+ return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'"
+}
+
+const (
+ // ShellCompDirectiveError indicates an error occurred and completions should be ignored.
+ ShellCompDirectiveError ShellCompDirective = 1 << iota
+
+ // ShellCompDirectiveNoSpace indicates that the shell should not add a space
+ // after the completion even if there is a single completion provided.
+ ShellCompDirectiveNoSpace
+
+ // ShellCompDirectiveNoFileComp indicates that the shell should not provide
+ // file completion even when no completion is provided.
+ ShellCompDirectiveNoFileComp
+
+ // ShellCompDirectiveFilterFileExt indicates that the provided completions
+ // should be used as file extension filters.
+ // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename()
+ // is a shortcut to using this directive explicitly. The BashCompFilenameExt
+ // annotation can also be used to obtain the same behavior for flags.
+ ShellCompDirectiveFilterFileExt
+
+ // ShellCompDirectiveFilterDirs indicates that only directory names should
+ // be provided in file completion. To request directory names within another
+ // directory, the returned completions should specify the directory within
+ // which to search. The BashCompSubdirsInDir annotation can be used to
+ // obtain the same behavior but only for flags.
+ ShellCompDirectiveFilterDirs
+
+ // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order
+ // in which the completions are provided
+ ShellCompDirectiveKeepOrder
+
+ // ===========================================================================
+
+ // All directives using iota should be above this one.
+ // For internal use.
+ shellCompDirectiveMaxValue
+
+ // ShellCompDirectiveDefault indicates to let the shell perform its default
+ // behavior after completions have been provided.
+ // This one must be last to avoid messing up the iota count.
+ ShellCompDirectiveDefault ShellCompDirective = 0
+)
+
+const (
+ // Constants for the completion command
+ compCmdName = "completion"
+ compCmdNoDescFlagName = "no-descriptions"
+ compCmdNoDescFlagDesc = "disable completion descriptions"
+ compCmdNoDescFlagDefault = false
+)
+
+// CompletionOptions are the options to control shell completion
+type CompletionOptions struct {
+ // DisableDefaultCmd prevents Cobra from creating a default 'completion' command
+ DisableDefaultCmd bool
+ // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
+ // for shells that support completion descriptions
+ DisableNoDescFlag bool
+ // DisableDescriptions turns off all completion descriptions for shells
+ // that support them
+ DisableDescriptions bool
+ // HiddenDefaultCmd makes the default 'completion' command hidden
+ HiddenDefaultCmd bool
+}
+
+// Completion is a string that can be used for completions
+//
+// two formats are supported:
+// - the completion choice
+// - the completion choice with a textual description (separated by a TAB).
+//
+// [CompletionWithDesc] can be used to create a completion string with a textual description.
+//
+// Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used.
+type Completion = string
+
+// CompletionFunc is a function that provides completion results.
+type CompletionFunc = func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective)
+
+// CompletionWithDesc returns a [Completion] with a description by using the TAB delimited format.
+func CompletionWithDesc(choice string, description string) Completion {
+ return choice + "\t" + description
+}
+
+// NoFileCompletions can be used to disable file completion for commands that should
+// not trigger file completions.
+//
+// This method satisfies [CompletionFunc].
+// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction].
+func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) {
+ return nil, ShellCompDirectiveNoFileComp
+}
+
+// FixedCompletions can be used to create a completion function which always
+// returns the same results.
+//
+// This method returns a function that satisfies [CompletionFunc]
+// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction].
+func FixedCompletions(choices []Completion, directive ShellCompDirective) CompletionFunc {
+ return func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) {
+ return choices, directive
+ }
+}
+
+// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+//
+// You can use pre-defined completion functions such as [FixedCompletions] or [NoFileCompletions],
+// or you can define your own.
+func (c *Command) RegisterFlagCompletionFunc(flagName string, f CompletionFunc) error {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
+ }
+ flagCompletionMutex.Lock()
+ defer flagCompletionMutex.Unlock()
+
+ if _, exists := flagCompletionFunctions[flag]; exists {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
+ }
+ flagCompletionFunctions[flag] = f
+ return nil
+}
+
+// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
+func (c *Command) GetFlagCompletionFunc(flagName string) (CompletionFunc, bool) {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return nil, false
+ }
+
+ flagCompletionMutex.RLock()
+ defer flagCompletionMutex.RUnlock()
+
+ completionFunc, exists := flagCompletionFunctions[flag]
+ return completionFunc, exists
+}
+
+// Returns a string listing the different directive enabled in the specified parameter
+func (d ShellCompDirective) string() string {
+ var directives []string
+ if d&ShellCompDirectiveError != 0 {
+ directives = append(directives, "ShellCompDirectiveError")
+ }
+ if d&ShellCompDirectiveNoSpace != 0 {
+ directives = append(directives, "ShellCompDirectiveNoSpace")
+ }
+ if d&ShellCompDirectiveNoFileComp != 0 {
+ directives = append(directives, "ShellCompDirectiveNoFileComp")
+ }
+ if d&ShellCompDirectiveFilterFileExt != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterFileExt")
+ }
+ if d&ShellCompDirectiveFilterDirs != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterDirs")
+ }
+ if d&ShellCompDirectiveKeepOrder != 0 {
+ directives = append(directives, "ShellCompDirectiveKeepOrder")
+ }
+ if len(directives) == 0 {
+ directives = append(directives, "ShellCompDirectiveDefault")
+ }
+
+ if d >= shellCompDirectiveMaxValue {
+ return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
+ }
+ return strings.Join(directives, ", ")
+}
+
+// initCompleteCmd adds a special hidden command that can be used to request custom completions.
+func (c *Command) initCompleteCmd(args []string) {
+ completeCmd := &Command{
+ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
+ Aliases: []string{ShellCompNoDescRequestCmd},
+ DisableFlagsInUseLine: true,
+ Hidden: true,
+ DisableFlagParsing: true,
+ Args: MinimumNArgs(1),
+ Short: "Request shell completion choices for the specified command-line",
+ Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
+ "to request completion choices for the specified command-line.", ShellCompRequestCmd),
+ Run: func(cmd *Command, args []string) {
+ finalCmd, completions, directive, err := cmd.getCompletions(args)
+ if err != nil {
+ CompErrorln(err.Error())
+ // Keep going for multiple reasons:
+ // 1- There could be some valid completions even though there was an error
+ // 2- Even without completions, we need to print the directive
+ }
+
+ noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd
+ if !noDescriptions {
+ if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil {
+ noDescriptions = !doDescriptions
+ }
+ }
+ noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable
+ out := finalCmd.OutOrStdout()
+ for _, comp := range completions {
+ if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) {
+ // Remove all activeHelp entries if it's disabled.
+ continue
+ }
+ if noDescriptions {
+ // Remove any description that may be included following a tab character.
+ comp = strings.SplitN(comp, "\t", 2)[0]
+ }
+
+ // Make sure we only write the first line to the output.
+ // This is needed if a description contains a linebreak.
+ // Otherwise the shell scripts will interpret the other lines as new flags
+ // and could therefore provide a wrong completion.
+ comp = strings.SplitN(comp, "\n", 2)[0]
+
+ // Finally trim the completion. This is especially important to get rid
+ // of a trailing tab when there are no description following it.
+ // For example, a sub-command without a description should not be completed
+ // with a tab at the end (or else zsh will show a -- following it
+ // although there is no description).
+ comp = strings.TrimSpace(comp)
+
+ // Print each possible completion to the output for the completion script to consume.
+ fmt.Fprintln(out, comp)
+ }
+
+ // As the last printout, print the completion directive for the completion script to parse.
+ // The directive integer must be that last character following a single colon (:).
+ // The completion script expects :
+ fmt.Fprintf(out, ":%d\n", directive)
+
+ // Print some helpful info to stderr for the user to understand.
+ // Output from stderr must be ignored by the completion script.
+ fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
+ },
+ }
+ c.AddCommand(completeCmd)
+ subCmd, _, err := c.Find(args)
+ if err != nil || subCmd.Name() != ShellCompRequestCmd {
+ // Only create this special command if it is actually being called.
+ // This reduces possible side-effects of creating such a command;
+ // for example, having this command would cause problems to a
+ // cobra program that only consists of the root command, since this
+ // command would cause the root command to suddenly have a subcommand.
+ c.RemoveCommand(completeCmd)
+ }
+}
+
+// SliceValue is a reduced version of [pflag.SliceValue]. It is used to detect
+// flags that accept multiple values and therefore can provide completion
+// multiple times.
+type SliceValue interface {
+ // GetSlice returns the flag value list as an array of strings.
+ GetSlice() []string
+}
+
+func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCompDirective, error) {
+ // The last argument, which is not completely typed by the user,
+ // should not be part of the list of arguments
+ toComplete := args[len(args)-1]
+ trimmedArgs := args[:len(args)-1]
+
+ var finalCmd *Command
+ var finalArgs []string
+ var err error
+ // Find the real command for which completion must be performed
+ // check if we need to traverse here to parse local flags on parent commands
+ if c.Root().TraverseChildren {
+ finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
+ } else {
+ // For Root commands that don't specify any value for their Args fields, when we call
+ // Find(), if those Root commands don't have any sub-commands, they will accept arguments.
+ // However, because we have added the __complete sub-command in the current code path, the
+ // call to Find() -> legacyArgs() will return an error if there are any arguments.
+ // To avoid this, we first remove the __complete command to get back to having no sub-commands.
+ rootCmd := c.Root()
+ if len(rootCmd.Commands()) == 1 {
+ rootCmd.RemoveCommand(c)
+ }
+
+ finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs)
+ }
+ if err != nil {
+ // Unable to find the real command. E.g., someInvalidCmd
+ return c, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs)
+ }
+ finalCmd.ctx = c.ctx
+
+ // These flags are normally added when `execute()` is called on `finalCmd`,
+ // however, when doing completion, we don't call `finalCmd.execute()`.
+ // Let's add the --help and --version flag ourselves but only if the finalCmd
+ // has not disabled flag parsing; if flag parsing is disabled, it is up to the
+ // finalCmd itself to handle the completion of *all* flags.
+ if !finalCmd.DisableFlagParsing {
+ finalCmd.InitDefaultHelpFlag()
+ finalCmd.InitDefaultVersionFlag()
+ }
+
+ // Check if we are doing flag value completion before parsing the flags.
+ // This is important because if we are completing a flag value, we need to also
+ // remove the flag name argument from the list of finalArgs or else the parsing
+ // could fail due to an invalid value (incomplete) for the flag.
+ flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+
+ // Check if interspersed is false or -- was set on a previous arg.
+ // This works by counting the arguments. Normally -- is not counted as arg but
+ // if -- was already set or interspersed is false and there is already one arg then
+ // the extra added -- is counted as arg.
+ flagCompletion := true
+ _ = finalCmd.ParseFlags(append(finalArgs, "--"))
+ newArgCount := finalCmd.Flags().NArg()
+
+ // Parse the flags early so we can check if required flags are set
+ if err = finalCmd.ParseFlags(finalArgs); err != nil {
+ return finalCmd, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
+ }
+
+ realArgCount := finalCmd.Flags().NArg()
+ if newArgCount > realArgCount {
+ // don't do flag completion (see above)
+ flagCompletion = false
+ }
+ // Error while attempting to parse flags
+ if flagErr != nil {
+ // If error type is flagCompError and we don't want flagCompletion we should ignore the error
+ if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) {
+ return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr
+ }
+ }
+
+ // Look for the --help or --version flags. If they are present,
+ // there should be no further completions.
+ if helpOrVersionFlagPresent(finalCmd) {
+ return finalCmd, []Completion{}, ShellCompDirectiveNoFileComp, nil
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ if flag != nil && flagCompletion {
+ // Check if we are completing a flag value subject to annotations
+ if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
+ if len(validExts) != 0 {
+ // File completion filtered by extensions
+ return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil
+ }
+
+ // The annotation requests simple file completion. There is no reason to do
+ // that since it is the default behavior anyway. Let's ignore this annotation
+ // in case the program also registered a completion function for this flag.
+ // Even though it is a mistake on the program's side, let's be nice when we can.
+ }
+
+ if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present {
+ if len(subDir) == 1 {
+ // Directory completion from within a directory
+ return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil
+ }
+ // Directory completion
+ return finalCmd, []Completion{}, ShellCompDirectiveFilterDirs, nil
+ }
+ }
+
+ var completions []Completion
+ var directive ShellCompDirective
+
+ // Enforce flag groups before doing flag completions
+ finalCmd.enforceFlagGroupsForCompletion()
+
+ // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true;
+ // doing this allows for completion of persistent flag names even for commands that disable flag parsing.
+ //
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
+ // the flag name to be complete
+ if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion {
+ // First check for required flags
+ completions = completeRequireFlags(finalCmd, toComplete)
+
+ // If we have not found any required flags, only then can we show regular flags
+ if len(completions) == 0 {
+ doCompleteFlags := func(flag *pflag.Flag) {
+ _, acceptsMultiple := flag.Value.(SliceValue)
+ acceptsMultiple = acceptsMultiple ||
+ strings.Contains(flag.Value.Type(), "Slice") ||
+ strings.Contains(flag.Value.Type(), "Array") ||
+ strings.HasPrefix(flag.Value.Type(), "stringTo")
+
+ if !flag.Changed || acceptsMultiple {
+ // If the flag is not already present, or if it can be specified multiple times (Array, Slice, or stringTo)
+ // we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ // Try to complete non-inherited flags even if DisableFlagParsing==true.
+ // This allows programs to tell Cobra about flags for completion even
+ // if the actual parsing of flags is not done by Cobra.
+ // For instance, Helm uses this to provide flag name completion for
+ // some of its plugins.
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ }
+
+ directive = ShellCompDirectiveNoFileComp
+ if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
+ // If there is a single completion, the shell usually adds a space
+ // after the completion. We don't want that if the flag ends with an =
+ directive = ShellCompDirectiveNoSpace
+ }
+
+ if !finalCmd.DisableFlagParsing {
+ // If DisableFlagParsing==false, we have completed the flags as known by Cobra;
+ // we can return what we found.
+ // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we
+ // let the logic continue to see if ValidArgsFunction needs to be called.
+ return finalCmd, completions, directive, nil
+ }
+ } else {
+ directive = ShellCompDirectiveDefault
+ if flag == nil {
+ foundLocalNonPersistentFlag := false
+ // If TraverseChildren is true on the root command we don't check for
+ // local flags because we can use a local flag on a parent command
+ if !finalCmd.Root().TraverseChildren {
+ // Check if there are any local, non-persistent flags on the command-line
+ localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
+ foundLocalNonPersistentFlag = true
+ }
+ })
+ }
+
+ // Complete subcommand names, including the help command
+ if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
+ // We only complete sub-commands if:
+ // - there are no arguments on the command-line and
+ // - there are no local, non-persistent flags on the command-line or TraverseChildren is true
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short))
+ }
+ directive = ShellCompDirectiveNoFileComp
+ }
+ }
+ }
+
+ // Complete required flags even without the '-' prefix
+ completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
+
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ if len(finalCmd.ValidArgs) > 0 {
+ if len(finalArgs) == 0 {
+ // ValidArgs are only for the first argument
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
+ }
+ directive = ShellCompDirectiveNoFileComp
+
+ // If no completions were found within commands or ValidArgs,
+ // see if there are any ArgAliases that should be completed.
+ if len(completions) == 0 {
+ for _, argAlias := range finalCmd.ArgAliases {
+ if strings.HasPrefix(argAlias, toComplete) {
+ completions = append(completions, argAlias)
+ }
+ }
+ }
+ }
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, directive, nil
+ }
+
+ // Let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
+ }
+ }
+
+ // Find the completion function for the flag or command
+ var completionFn CompletionFunc
+ if flag != nil && flagCompletion {
+ flagCompletionMutex.RLock()
+ completionFn = flagCompletionFunctions[flag]
+ flagCompletionMutex.RUnlock()
+ } else {
+ completionFn = finalCmd.ValidArgsFunction
+ }
+ if completionFn != nil {
+ // Go custom completion defined for this flag or command.
+ // Call the registered completion function to get the completions.
+ var comps []Completion
+ comps, directive = completionFn(finalCmd, finalArgs, toComplete)
+ completions = append(completions, comps...)
+ }
+
+ return finalCmd, completions, directive, nil
+}
+
+func helpOrVersionFlagPresent(cmd *Command) bool {
+ if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil &&
+ len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed {
+ return true
+ }
+ if helpFlag := cmd.Flags().Lookup(helpFlagName); helpFlag != nil &&
+ len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed {
+ return true
+ }
+ return false
+}
+
+func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []Completion {
+ if nonCompletableFlag(flag) {
+ return []Completion{}
+ }
+
+ var completions []Completion
+ flagName := "--" + flag.Name
+ if strings.HasPrefix(flagName, toComplete) {
+ // Flag without the =
+ completions = append(completions, CompletionWithDesc(flagName, flag.Usage))
+
+ // Why suggest both long forms: --flag and --flag= ?
+ // This forces the user to *always* have to type either an = or a space after the flag name.
+ // Let's be nice and avoid making users have to do that.
+ // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it.
+ // The = form will still work, we just won't suggest it.
+ // This also makes the list of suggested flags shorter as we avoid all the = forms.
+ //
+ // if len(flag.NoOptDefVal) == 0 {
+ // // Flag requires a value, so it can be suffixed with =
+ // flagName += "="
+ // completions = append(completions, CompletionWithDesc(flagName, flag.Usage))
+ // }
+ }
+
+ flagName = "-" + flag.Shorthand
+ if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
+ completions = append(completions, CompletionWithDesc(flagName, flag.Usage))
+ }
+
+ return completions
+}
+
+func completeRequireFlags(finalCmd *Command, toComplete string) []Completion {
+ var completions []Completion
+
+ doCompleteRequiredFlags := func(flag *pflag.Flag) {
+ if _, present := flag.Annotations[BashCompOneRequiredFlag]; present {
+ if !flag.Changed {
+ // If the flag is not already present, we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+
+ return completions
+}
+
+func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
+ if finalCmd.DisableFlagParsing {
+ // We only do flag completion if we are allowed to parse flags
+ // This is important for commands which have requested to do their own flag completion.
+ return nil, args, lastArg, nil
+ }
+
+ var flagName string
+ trimmedArgs := args
+ flagWithEqual := false
+ orgLastArg := lastArg
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as that function
+ // requires the flag name to be complete
+ if len(lastArg) > 0 && lastArg[0] == '-' {
+ if index := strings.Index(lastArg, "="); index >= 0 {
+ // Flag with an =
+ if strings.HasPrefix(lastArg[:index], "--") {
+ // Flag has full name
+ flagName = lastArg[2:index]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = lastArg[index-1 : index]
+ }
+ lastArg = lastArg[index+1:]
+ flagWithEqual = true
+ } else {
+ // Normal flag completion
+ return nil, args, lastArg, nil
+ }
+ }
+
+ if len(flagName) == 0 {
+ if len(args) > 0 {
+ prevArg := args[len(args)-1]
+ if isFlagArg(prevArg) {
+ // Only consider the case where the flag does not contain an =.
+ // If the flag contains an = it means it has already been fully processed,
+ // so we don't need to deal with it here.
+ if index := strings.Index(prevArg, "="); index < 0 {
+ if strings.HasPrefix(prevArg, "--") {
+ // Flag has full name
+ flagName = prevArg[2:]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = prevArg[len(prevArg)-1:]
+ }
+ // Remove the uncompleted flag or else there could be an error created
+ // for an invalid value for that flag
+ trimmedArgs = args[:len(args)-1]
+ }
+ }
+ }
+ }
+
+ if len(flagName) == 0 {
+ // Not doing flag completion
+ return nil, trimmedArgs, lastArg, nil
+ }
+
+ flag := findFlag(finalCmd, flagName)
+ if flag == nil {
+ // Flag not supported by this command, the interspersed option might be set so return the original args
+ return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName}
+ }
+
+ if !flagWithEqual {
+ if len(flag.NoOptDefVal) != 0 {
+ // We had assumed dealing with a two-word flag but the flag is a boolean flag.
+ // In that case, there is no value following it, so we are not really doing flag completion.
+ // Reset everything to do noun completion.
+ trimmedArgs = args
+ flag = nil
+ }
+ }
+
+ return flag, trimmedArgs, lastArg, nil
+}
+
+// InitDefaultCompletionCmd adds a default 'completion' command to c.
+// This function will do nothing if any of the following is true:
+// 1- the feature has been explicitly disabled by the program,
+// 2- c has no subcommands (to avoid creating one),
+// 3- c already has a 'completion' command provided by the program.
+func (c *Command) InitDefaultCompletionCmd(args ...string) {
+ if c.CompletionOptions.DisableDefaultCmd {
+ return
+ }
+
+ for _, cmd := range c.commands {
+ if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) {
+ // A completion command is already available
+ return
+ }
+ }
+
+ haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions
+
+ // Special case to know if there are sub-commands or not.
+ hasSubCommands := false
+ for _, cmd := range c.commands {
+ if cmd.Name() != ShellCompRequestCmd && cmd.Name() != helpCommandName {
+ // We found a real sub-command (not 'help' or '__complete')
+ hasSubCommands = true
+ break
+ }
+ }
+
+ completionCmd := &Command{
+ Use: compCmdName,
+ Short: "Generate the autocompletion script for the specified shell",
+ Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell.
+See each sub-command's help for details on how to use the generated script.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ Hidden: c.CompletionOptions.HiddenDefaultCmd,
+ GroupID: c.completionCommandGroupID,
+ }
+ c.AddCommand(completionCmd)
+
+ if !hasSubCommands {
+ // If the 'completion' command will be the only sub-command,
+ // we only create it if it is actually being called.
+ // This avoids breaking programs that would suddenly find themselves with
+ // a subcommand, which would prevent them from accepting arguments.
+ // We also create the 'completion' command if the user is triggering
+ // shell completion for it (prog __complete completion '')
+ subCmd, cmdArgs, err := c.Find(args)
+ if err != nil || subCmd.Name() != compCmdName &&
+ !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) {
+ // The completion command is not being called or being completed so we remove it.
+ c.RemoveCommand(completionCmd)
+ return
+ }
+ }
+
+ out := c.OutOrStdout()
+ noDesc := c.CompletionOptions.DisableDescriptions
+ shortDesc := "Generate the autocompletion script for %s"
+ bash := &Command{
+ Use: "bash",
+ Short: fmt.Sprintf(shortDesc, "bash"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell.
+
+This script depends on the 'bash-completion' package.
+If it is not installed already, you can install it via your OS's package manager.
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion bash)
+
+To load completions for every new session, execute once:
+
+#### Linux:
+
+ %[1]s completion bash > /etc/bash_completion.d/%[1]s
+
+#### macOS:
+
+ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ DisableFlagsInUseLine: true,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenBashCompletionV2(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ zsh := &Command{
+ Use: "zsh",
+ Short: fmt.Sprintf(shortDesc, "zsh"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell.
+
+If shell completion is not already enabled in your environment you will need
+to enable it. You can execute the following once:
+
+ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion zsh)
+
+To load completions for every new session, execute once:
+
+#### Linux:
+
+ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
+
+#### macOS:
+
+ %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenZshCompletionNoDesc(out)
+ }
+ return cmd.Root().GenZshCompletion(out)
+ },
+ }
+ if haveNoDescFlag {
+ zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ fish := &Command{
+ Use: "fish",
+ Short: fmt.Sprintf(shortDesc, "fish"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell.
+
+To load completions in your current shell session:
+
+ %[1]s completion fish | source
+
+To load completions for every new session, execute once:
+
+ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenFishCompletion(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ powershell := &Command{
+ Use: "powershell",
+ Short: fmt.Sprintf(shortDesc, "powershell"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for powershell.
+
+To load completions in your current shell session:
+
+ %[1]s completion powershell | Out-String | Invoke-Expression
+
+To load completions for every new session, add the output of the above command
+to your powershell profile.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenPowerShellCompletion(out)
+ }
+ return cmd.Root().GenPowerShellCompletionWithDesc(out)
+
+ },
+ }
+ if haveNoDescFlag {
+ powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ completionCmd.AddCommand(bash, zsh, fish, powershell)
+}
+
+func findFlag(cmd *Command, name string) *pflag.Flag {
+ flagSet := cmd.Flags()
+ if len(name) == 1 {
+ // First convert the short flag into a long flag
+ // as the cmd.Flag() search only accepts long flags
+ if short := flagSet.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ set := cmd.InheritedFlags()
+ if short = set.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ return nil
+ }
+ }
+ }
+ return cmd.Flag(name)
+}
+
+// CompDebug prints the specified string to the same file as where the
+// completion script prints its logs.
+// Note that completion printouts should never be on stdout as they would
+// be wrongly interpreted as actual completion choices by the completion script.
+func CompDebug(msg string, printToStdErr bool) {
+ msg = fmt.Sprintf("[Debug] %s", msg)
+
+ // Such logs are only printed when the user has set the environment
+ // variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+ if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
+ f, err := os.OpenFile(path,
+ os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err == nil {
+ defer f.Close()
+ WriteStringAndCheck(f, msg)
+ }
+ }
+
+ if printToStdErr {
+ // Must print to stderr for this not to be read by the completion script.
+ fmt.Fprint(os.Stderr, msg)
+ }
+}
+
+// CompDebugln prints the specified string with a newline at the end
+// to the same file as where the completion script prints its logs.
+// Such logs are only printed when the user has set the environment
+// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+func CompDebugln(msg string, printToStdErr bool) {
+ CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
+}
+
+// CompError prints the specified completion message to stderr.
+func CompError(msg string) {
+ msg = fmt.Sprintf("[Error] %s", msg)
+ CompDebug(msg, true)
+}
+
+// CompErrorln prints the specified completion message to stderr with a newline at the end.
+func CompErrorln(msg string) {
+ CompError(fmt.Sprintf("%s\n", msg))
+}
+
+// These values should not be changed: users will be using them explicitly.
+const (
+ configEnvVarGlobalPrefix = "COBRA"
+ configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS"
+)
+
+var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`)
+
+// configEnvVar returns the name of the program-specific configuration environment
+// variable. It has the format _ where is the name of the
+// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
+func configEnvVar(name, suffix string) string {
+ // This format should not be changed: users will be using it explicitly.
+ v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix))
+ v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_")
+ return v
+}
+
+// getEnvConfig returns the value of the configuration environment variable
+// _ where is the name of the root command in upper
+// case, with all non-ASCII-alphanumeric characters replaced by `_`.
+// If the value is empty or not set, the value of the environment variable
+// COBRA_ is returned instead.
+func getEnvConfig(cmd *Command, suffix string) string {
+ v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix))
+ if v == "" {
+ v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix))
+ }
+ return v
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
new file mode 100644
index 0000000..12d61b6
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -0,0 +1,292 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
+
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+function __%[1]s_debug
+ set -l file "$BASH_COMP_DEBUG_FILE"
+ if test -n "$file"
+ echo "$argv" >> $file
+ end
+end
+
+function __%[1]s_perform_completion
+ __%[1]s_debug "Starting __%[1]s_perform_completion"
+
+ # Extract all args except the last one
+ set -l args (commandline -opc)
+ # Extract the last arg and escape it in case it is a space
+ set -l lastArg (string escape -- (commandline -ct))
+
+ __%[1]s_debug "args: $args"
+ __%[1]s_debug "last arg: $lastArg"
+
+ # Disable ActiveHelp which is not supported for fish shell
+ set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
+
+ __%[1]s_debug "Calling $requestComp"
+ set -l results (eval $requestComp 2> /dev/null)
+
+ # Some programs may output extra empty lines after the directive.
+ # Let's ignore them or else it will break completion.
+ # Ref: https://github.com/spf13/cobra/issues/1279
+ for line in $results[-1..1]
+ if test (string trim -- $line) = ""
+ # Found an empty line, remove it
+ set results $results[1..-2]
+ else
+ # Found non-empty line, we have our proper output
+ break
+ end
+ end
+
+ set -l comps $results[1..-2]
+ set -l directiveLine $results[-1]
+
+ # For Fish, when completing a flag with an = (e.g., -n=)
+ # completions must be prefixed with the flag
+ set -l flagPrefix (string match -r -- '-.*=' "$lastArg")
+
+ __%[1]s_debug "Comps: $comps"
+ __%[1]s_debug "DirectiveLine: $directiveLine"
+ __%[1]s_debug "flagPrefix: $flagPrefix"
+
+ for comp in $comps
+ printf "%%s%%s\n" "$flagPrefix" "$comp"
+ end
+
+ printf "%%s\n" "$directiveLine"
+end
+
+# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result
+function __%[1]s_perform_completion_once
+ __%[1]s_debug "Starting __%[1]s_perform_completion_once"
+
+ if test -n "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion"
+ return 0
+ end
+
+ set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion)
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "No completions, probably due to a failure"
+ return 1
+ end
+
+ __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result"
+ return 0
+end
+
+# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run
+function __%[1]s_clear_perform_completion_once_result
+ __%[1]s_debug ""
+ __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable =========="
+ set --erase __%[1]s_perform_completion_once_result
+ __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result"
+end
+
+function __%[1]s_requires_order_preservation
+ __%[1]s_debug ""
+ __%[1]s_debug "========= checking if order preservation is required =========="
+
+ __%[1]s_perform_completion_once
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "Error determining if order preservation is required"
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
+ __%[1]s_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveKeepOrder %[9]d
+ set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2)
+ __%[1]s_debug "Keeporder is: $keeporder"
+
+ if test $keeporder -ne 0
+ __%[1]s_debug "This does require order preservation"
+ return 0
+ end
+
+ __%[1]s_debug "This doesn't require order preservation"
+ return 1
+end
+
+
+# This function does two things:
+# - Obtain the completions and store them in the global __%[1]s_comp_results
+# - Return false if file completion should be performed
+function __%[1]s_prepare_completions
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+
+ # Start fresh
+ set --erase __%[1]s_comp_results
+
+ __%[1]s_perform_completion_once
+ __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result"
+
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "No completion, probably due to a failure"
+ # Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
+ set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2]
+
+ __%[1]s_debug "Completions are: $__%[1]s_comp_results"
+ __%[1]s_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveError %[4]d
+ set -l shellCompDirectiveNoSpace %[5]d
+ set -l shellCompDirectiveNoFileComp %[6]d
+ set -l shellCompDirectiveFilterFileExt %[7]d
+ set -l shellCompDirectiveFilterDirs %[8]d
+
+ if test -z "$directive"
+ set directive 0
+ end
+
+ set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
+ if test $compErr -eq 1
+ __%[1]s_debug "Received error directive: aborting."
+ # Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
+ set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
+ if test $filefilter -eq 1; or test $dirfilter -eq 1
+ __%[1]s_debug "File extension filtering or directory filtering not supported"
+ # Do full file completion instead
+ return 1
+ end
+
+ set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
+ set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
+
+ __%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
+
+ # If we want to prevent a space, or if file completion is NOT disabled,
+ # we need to count the number of valid completions.
+ # To do so, we will filter on prefix as the completions we have received
+ # may not already be filtered so as to allow fish to match on different
+ # criteria than the prefix.
+ if test $nospace -ne 0; or test $nofiles -eq 0
+ set -l prefix (commandline -t | string escape --style=regex)
+ __%[1]s_debug "prefix: $prefix"
+
+ set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results)
+ set --global __%[1]s_comp_results $completions
+ __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results"
+
+ # Important not to quote the variable for count to work
+ set -l numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # We must first split on \t to get rid of the descriptions to be
+ # able to check what the actual completion will be.
+ # We don't need descriptions anyway since there is only a single
+ # real completion which the shell will expand immediately.
+ set -l split (string split --max 1 \t $__%[1]s_comp_results[1])
+
+ # Fish won't add a space if the completion ends with any
+ # of the following characters: @=/:.,
+ set -l lastChar (string sub -s -1 -- $split)
+ if not string match -r -q "[@=/:.,]" -- "$lastChar"
+ # In other cases, to support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set --global __%[1]s_comp_results $split[1] $split[1].
+ __%[1]s_debug "Completions are now: $__%[1]s_comp_results"
+ end
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ # To be consistent with bash and zsh, we only trigger file
+ # completion when there are no other completions
+ __%[1]s_debug "Requesting file completion"
+ return 1
+ end
+ end
+
+ return 0
+end
+
+# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
+# so we can properly delete any completions provided by another script.
+# Only do this if the program can be found, or else fish may print some errors; besides,
+# the existing completions will only be loaded if the program can be found.
+if type -q "%[2]s"
+ # The space after the program name is essential to trigger completion for the program
+ # and not completion of the program name itself.
+ # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
+ complete --do-complete "%[2]s " > /dev/null 2>&1
+end
+
+# Remove any pre-existing completions for the program since we will be handling all of them.
+complete -c %[2]s -e
+
+# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global
+complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result'
+# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
+# which provides the program's completion choices.
+# If this doesn't require order preservation, we don't use the -k flag
+complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+# otherwise we use the -k flag
+complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+`, nameForVar, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
+}
+
+// GenFishCompletion generates fish completion file and writes to the passed writer.
+func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genFishComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenFishCompletionFile generates fish completion file.
+func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenFishCompletion(outFile, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go
new file mode 100644
index 0000000..560612f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/flag_groups.go
@@ -0,0 +1,290 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set"
+ oneRequiredAnnotation = "cobra_annotation_one_required"
+ mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive"
+)
+
+// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
+// if the command is invoked with a subset (but not all) of the given flags.
+func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
+// if the command is invoked without at least one flag from the given set of flags.
+func (c *Command) MarkFlagsOneRequired(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
+// if the command is invoked with more than one flag from the given set of flags.
+func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v))
+ }
+ // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed.
+ if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
+// first error encountered.
+func (c *Command) ValidateFlagGroups() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+
+ // groupStatus format is the list of flags as a unique ID,
+ // then a map of each flag name and whether it is set or not.
+ groupStatus := map[string]map[string]bool{}
+ oneRequiredGroupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus)
+ })
+
+ if err := validateRequiredFlagGroups(groupStatus); err != nil {
+ return err
+ }
+ if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil {
+ return err
+ }
+ if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil {
+ return err
+ }
+ return nil
+}
+
+func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool {
+ for _, fname := range flagnames {
+ f := fs.Lookup(fname)
+ if f == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) {
+ groupInfo, found := pflag.Annotations[annotation]
+ if found {
+ for _, group := range groupInfo {
+ if groupStatus[group] == nil {
+ flagnames := strings.Split(group, " ")
+
+ // Only consider this flag group at all if all the flags are defined.
+ if !hasAllFlags(flags, flagnames...) {
+ continue
+ }
+
+ groupStatus[group] = make(map[string]bool, len(flagnames))
+ for _, name := range flagnames {
+ groupStatus[group][name] = false
+ }
+ }
+
+ groupStatus[group][pflag.Name] = pflag.Changed
+ }
+ }
+}
+
+func validateRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+
+ unset := []string{}
+ for flagname, isSet := range flagnameAndStatus {
+ if !isSet {
+ unset = append(unset, flagname)
+ }
+ }
+ if len(unset) == len(flagnameAndStatus) || len(unset) == 0 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(unset)
+ return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset)
+ }
+
+ return nil
+}
+
+func validateOneRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) >= 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList)
+ }
+ return nil
+}
+
+func validateExclusiveFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) == 0 || len(set) == 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set)
+ }
+ return nil
+}
+
+func sortedKeys(m map[string]map[string]bool) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// enforceFlagGroupsForCompletion will do the following:
+// - when a flag in a group is present, other flags in the group will be marked required
+// - when none of the flags in a one-required group are present, all flags in the group will be marked required
+// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden
+// This allows the standard completion logic to behave appropriately for flag groups
+func (c *Command) enforceFlagGroupsForCompletion() {
+ if c.DisableFlagParsing {
+ return
+ }
+
+ flags := c.Flags()
+ groupStatus := map[string]map[string]bool{}
+ oneRequiredGroupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ c.Flags().VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus)
+ })
+
+ // If a flag that is part of a group is present, we make all the other flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range groupStatus {
+ for _, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the group is set, mark the other ones as required
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+ }
+
+ // If none of the flags of a one-required group are present, we make all the flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range oneRequiredGroupStatus {
+ isSet := false
+
+ for _, isSet = range flagnameAndStatus {
+ if isSet {
+ break
+ }
+ }
+
+ // None of the flags of the group are set, mark all flags in the group
+ // as required
+ if !isSet {
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+
+ // If a flag that is mutually exclusive to others is present, we hide the other
+ // flags of that group so the shell completion does not suggest them
+ for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus {
+ for flagName, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the mutually exclusive group is set, mark the other ones as hidden
+ // Don't mark the flag that is already set as hidden because it may be an
+ // array or slice flag and therefore must continue being suggested
+ for _, fName := range strings.Split(flagList, " ") {
+ if fName != flagName {
+ flag := c.Flags().Lookup(fName)
+ flag.Hidden = true
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
new file mode 100644
index 0000000..746dcb9
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -0,0 +1,350 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
+// can be downloaded separately for windows 7 or 8.1).
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
+
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*-
+
+function __%[1]s_debug {
+ if ($env:BASH_COMP_DEBUG_FILE) {
+ "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE"
+ }
+}
+
+filter __%[1]s_escapeStringWithSpecialChars {
+`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
+}
+
+[scriptblock]${__%[2]sCompleterBlock} = {
+ param(
+ $WordToComplete,
+ $CommandAst,
+ $CursorPosition
+ )
+
+ # Get the current command line and convert into a string
+ $Command = $CommandAst.CommandElements
+ $Command = "$Command"
+
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CursorPosition location, so we need
+ # to truncate the command-line ($Command) up to the $CursorPosition location.
+ # Make sure the $Command is longer then the $CursorPosition before we truncate.
+ # This happens because the $Command does not include the last space.
+ if ($Command.Length -gt $CursorPosition) {
+ $Command=$Command.Substring(0,$CursorPosition)
+ }
+ __%[1]s_debug "Truncated command: $Command"
+
+ $ShellCompDirectiveError=%[4]d
+ $ShellCompDirectiveNoSpace=%[5]d
+ $ShellCompDirectiveNoFileComp=%[6]d
+ $ShellCompDirectiveFilterFileExt=%[7]d
+ $ShellCompDirectiveFilterDirs=%[8]d
+ $ShellCompDirectiveKeepOrder=%[9]d
+
+ # Prepare the command to request completions for the program.
+ # Split the command at the first space to separate the program and arguments.
+ $Program,$Arguments = $Command.Split(" ",2)
+
+ $RequestComp="$Program %[3]s $Arguments"
+ __%[1]s_debug "RequestComp: $RequestComp"
+
+ # we cannot use $WordToComplete because it
+ # has the wrong values if the cursor was moved
+ # so use the last argument
+ if ($WordToComplete -ne "" ) {
+ $WordToComplete = $Arguments.Split(" ")[-1]
+ }
+ __%[1]s_debug "New WordToComplete: $WordToComplete"
+
+
+ # Check for flag with equal sign
+ $IsEqualFlag = ($WordToComplete -Like "--*=*" )
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Completing equal sign flag"
+ # Remove the flag part
+ $Flag,$WordToComplete = $WordToComplete.Split("=",2)
+ }
+
+ if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) {
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+ # PowerShell 7.2+ changed the way how the arguments are passed to executables,
+ # so for pre-7.2 or when Legacy argument passing is enabled we need to use
+`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+`
+ if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or
+ ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or
+ (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and
+ $PSNativeCommandArgumentPassing -eq 'Legacy')) {
+`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
+ } else {
+ $RequestComp="$RequestComp" + ' ""'
+ }
+ }
+
+ __%[1]s_debug "Calling $RequestComp"
+ # First disable ActiveHelp which is not supported for Powershell
+ ${env:%[10]s}=0
+
+ #call the command store the output in $out and redirect stderr and stdout to null
+ # $Out is an array contains each line per element
+ Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
+
+ # get directive from last line
+ [int]$Directive = $Out[-1].TrimStart(':')
+ if ($Directive -eq "") {
+ # There is no directive specified
+ $Directive = 0
+ }
+ __%[1]s_debug "The completion directive is: $Directive"
+
+ # remove directive (last element) from out
+ $Out = $Out | Where-Object { $_ -ne $Out[-1] }
+ __%[1]s_debug "The completions are: $Out"
+
+ if (($Directive -band $ShellCompDirectiveError) -ne 0 ) {
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ }
+
+ $Longest = 0
+ [Array]$Values = $Out | ForEach-Object {
+ #Split the output in name and description
+`+" $Name, $Description = $_.Split(\"`t\",2)"+`
+ __%[1]s_debug "Name: $Name Description: $Description"
+
+ # Look for the longest completion so that we can format things nicely
+ if ($Longest -lt $Name.Length) {
+ $Longest = $Name.Length
+ }
+
+ # Set the description to a one space string if there is none set.
+ # This is needed because the CompletionResult does not accept an empty string as argument
+ if (-Not $Description) {
+ $Description = " "
+ }
+ New-Object -TypeName PSCustomObject -Property @{
+ Name = "$Name"
+ Description = "$Description"
+ }
+ }
+
+
+ $Space = " "
+ if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) {
+ # remove the space here
+ __%[1]s_debug "ShellCompDirectiveNoSpace is called"
+ $Space = ""
+ }
+
+ if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or
+ (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) {
+ __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported"
+
+ # return here to prevent the completion of the extensions
+ return
+ }
+
+ $Values = $Values | Where-Object {
+ # filter the result
+ $_.Name -like "$WordToComplete*"
+
+ # Join the flag back if we have an equal sign flag
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Join the equal sign flag back to the completion value"
+ $_.Name = $Flag + "=" + $_.Name
+ }
+ }
+
+ # we sort the values in ascending order by name if keep order isn't passed
+ if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) {
+ $Values = $Values | Sort-Object -Property Name
+ }
+
+ if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
+ __%[1]s_debug "ShellCompDirectiveNoFileComp is called"
+
+ if ($Values.Length -eq 0) {
+ # Just print an empty string here so the
+ # shell does not start to complete paths.
+ # We cannot use CompletionResult here because
+ # it does not accept an empty string as argument.
+ ""
+ return
+ }
+ }
+
+ # Get the current mode
+ $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function
+ __%[1]s_debug "Mode: $Mode"
+
+ $Values | ForEach-Object {
+
+ # store temporary because switch will overwrite $_
+ $comp = $_
+
+ # PowerShell supports three different completion modes
+ # - TabCompleteNext (default windows style - on each key press the next option is displayed)
+ # - Complete (works like bash)
+ # - MenuComplete (works like zsh)
+ # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function
+
+ # CompletionResult Arguments:
+ # 1) CompletionText text to be used as the auto completion result
+ # 2) ListItemText text to be displayed in the suggestion list
+ # 3) ResultType type of completion result
+ # 4) ToolTip text for the tooltip with details about the object
+
+ switch ($Mode) {
+
+ # bash like
+ "Complete" {
+
+ if ($Values.Length -eq 1) {
+ __%[1]s_debug "Only one completion left"
+
+ # insert space after value
+ $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space
+ if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){
+ [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ } else {
+ $CompletionText
+ }
+
+ } else {
+ # Add the proper number of spaces to align the descriptions
+ while($comp.Name.Length -lt $Longest) {
+ $comp.Name = $comp.Name + " "
+ }
+
+ # Check for empty description and only add parentheses if needed
+ if ($($comp.Description) -eq " " ) {
+ $Description = ""
+ } else {
+ $Description = " ($($comp.Description))"
+ }
+
+ $CompletionText = "$($comp.Name)$Description"
+ if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){
+ [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)")
+ } else {
+ $CompletionText
+ }
+ }
+ }
+
+ # zsh like
+ "MenuComplete" {
+ # insert space after value
+ # MenuComplete will automatically show the ToolTip of
+ # the highlighted value at the bottom of the suggestions.
+
+ $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space
+ if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){
+ [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ } else {
+ $CompletionText
+ }
+ }
+
+ # TabCompleteNext and in case we get something unknown
+ Default {
+ # Like MenuComplete but we don't want to add a space here because
+ # the user need to press space anyway to get the completion.
+ # Description will not be shown because that's not possible with TabCompleteNext
+
+ $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars)
+ if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){
+ [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ } else {
+ $CompletionText
+ }
+ }
+ }
+
+ }
+}
+
+Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock}
+`, name, nameForVar, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
+}
+
+func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genPowerShellComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genPowerShellCompletion(outFile, includeDesc)
+}
+
+// GenPowerShellCompletionFile generates powershell completion file without descriptions.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+ return c.genPowerShellCompletionFile(filename, false)
+}
+
+// GenPowerShellCompletion generates powershell completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+ return c.genPowerShellCompletion(w, false)
+}
+
+// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
+func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error {
+ return c.genPowerShellCompletionFile(filename, true)
+}
+
+// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error {
+ return c.genPowerShellCompletion(w, true)
+}
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
new file mode 100644
index 0000000..b035742
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -0,0 +1,98 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired instructs the various shell completion implementations to
+// prioritize the named persistent flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename instructs the various shell completion
+// implementations to limit completions for the named persistent flag to the
+// specified file extensions.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func (c *Command) MarkFlagDirname(name string) error {
+ return MarkFlagDirname(c.Flags(), name)
+}
+
+// MarkPersistentFlagDirname instructs the various shell completion
+// implementations to limit completions for the named persistent flag to
+// directory names.
+func (c *Command) MarkPersistentFlagDirname(name string) error {
+ return MarkFlagDirname(c.PersistentFlags(), name)
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{})
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 0000000..1856e4c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,308 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// GenZshCompletionFile generates zsh completion file including descriptions.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ return c.genZshCompletionFile(filename, true)
+}
+
+// GenZshCompletion generates zsh completion file including descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ return c.genZshCompletion(w, true)
+}
+
+// GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
+func (c *Command) GenZshCompletionFileNoDesc(filename string) error {
+ return c.genZshCompletionFile(filename, false)
+}
+
+// GenZshCompletionNoDesc generates zsh completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletionNoDesc(w io.Writer) error {
+ return c.genZshCompletion(w, false)
+}
+
+// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
+// not consistent with Bash completion. It has therefore been disabled.
+// Instead, when no other completion is specified, file completion is done by
+// default for every argument. One can disable file completion on a per-argument
+// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
+// To achieve file extension filtering, one can use ValidArgsFunction and
+// ShellCompDirectiveFilterFileExt.
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
+ return nil
+}
+
+// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
+// been disabled.
+// To achieve the same behavior across all shells, one can use
+// ValidArgs (for the first argument only) or ValidArgsFunction for
+// any argument (can include the first one also).
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
+ return nil
+}
+
+func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genZshCompletion(outFile, includeDesc)
+}
+
+func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genZshComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
+compdef _%[1]s %[1]s
+
+# zsh completion for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ local file="$BASH_COMP_DEBUG_FILE"
+ if [[ -n ${file} ]]; then
+ echo "$*" >> "${file}"
+ fi
+}
+
+_%[1]s()
+{
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveKeepOrder=%[8]d
+
+ local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder
+ local -a completions
+
+ __%[1]s_debug "\n========= starting completion logic =========="
+ __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CURRENT location, so we need
+ # to truncate the command-line ($words) up to the $CURRENT location.
+ # (We cannot use $CURSOR as its value does not work when a command is an alias.)
+ words=("${=words[1,CURRENT]}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ lastParam=${words[-1]}
+ lastChar=${lastParam[-1]}
+ __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}"
+
+ # For zsh, when completing a flag with an = (e.g., %[1]s -n=)
+ # completions must be prefixed with the flag
+ setopt local_options BASH_REMATCH
+ if [[ "${lastParam}" =~ '-.*=' ]]; then
+ # We are dealing with a flag with an =
+ flagPrefix="-P ${BASH_REMATCH}"
+ fi
+
+ # Prepare the command to obtain completions
+ requestComp="${words[1]} %[2]s ${words[2,-1]}"
+ if [ "${lastChar}" = "" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go completion code.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "About to call: eval ${requestComp}"
+
+ # Use eval to handle any environment variables and such
+ out=$(eval ${requestComp} 2>/dev/null)
+ __%[1]s_debug "completion output: ${out}"
+
+ # Extract the directive integer following a : from the last line
+ local lastLine
+ while IFS='\n' read -r line; do
+ lastLine=${line}
+ done < <(printf "%%s\n" "${out[@]}")
+ __%[1]s_debug "last line: ${lastLine}"
+
+ if [ "${lastLine[1]}" = : ]; then
+ directive=${lastLine[2,-1]}
+ # Remove the directive including the : and the newline
+ local suffix
+ (( suffix=${#lastLine}+2))
+ out=${out[1,-$suffix]}
+ else
+ # There is no directive specified. Leave $out as is.
+ __%[1]s_debug "No directive found. Setting do default"
+ directive=0
+ fi
+
+ __%[1]s_debug "directive: ${directive}"
+ __%[1]s_debug "completions: ${out}"
+ __%[1]s_debug "flagPrefix: ${flagPrefix}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ __%[1]s_debug "Completion received error. Ignoring completions."
+ return
+ fi
+
+ local activeHelpMarker="%[9]s"
+ local endIndex=${#activeHelpMarker}
+ local startIndex=$((${#activeHelpMarker}+1))
+ local hasActiveHelp=0
+ while IFS='\n' read -r comp; do
+ # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
+ if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
+ __%[1]s_debug "ActiveHelp found: $comp"
+ comp="${comp[$startIndex,-1]}"
+ if [ -n "$comp" ]; then
+ compadd -x "${comp}"
+ __%[1]s_debug "ActiveHelp will need delimiter"
+ hasActiveHelp=1
+ fi
+
+ continue
+ fi
+
+ if [ -n "$comp" ]; then
+ # If requested, completions are returned with a description.
+ # The description is preceded by a TAB character.
+ # For zsh's _describe, we need to use a : instead of a TAB.
+ # We first need to escape any : as part of the completion itself.
+ comp=${comp//:/\\:}
+
+ local tab="$(printf '\t')"
+ comp=${comp//$tab/:}
+
+ __%[1]s_debug "Adding completion: ${comp}"
+ completions+=${comp}
+ lastComp=$comp
+ fi
+ done < <(printf "%%s\n" "${out[@]}")
+
+ # Add a delimiter after the activeHelp statements, but only if:
+ # - there are completions following the activeHelp statements, or
+ # - file completion will be performed (so there will be choices after the activeHelp)
+ if [ $hasActiveHelp -eq 1 ]; then
+ if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
+ __%[1]s_debug "Adding activeHelp delimiter"
+ compadd -x "--"
+ hasActiveHelp=0
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ __%[1]s_debug "Activating nospace."
+ noSpace="-S ''"
+ fi
+
+ if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then
+ __%[1]s_debug "Activating keep order."
+ keepOrder="-V"
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local filteringCmd
+ filteringCmd='_files'
+ for filter in ${completions[@]}; do
+ if [ ${filter[1]} != '*' ]; then
+ # zsh requires a glob pattern to do file filtering
+ filter="\*.$filter"
+ fi
+ filteringCmd+=" -g $filter"
+ done
+ filteringCmd+=" ${flagPrefix}"
+
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ _arguments '*:filename:'"$filteringCmd"
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subdir
+ subdir="${completions[1]}"
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "${subdir}" >/dev/null 2>&1
+ else
+ __%[1]s_debug "Listing directories in ."
+ fi
+
+ local result
+ _arguments '*:dirname:_files -/'" ${flagPrefix}"
+ result=$?
+ if [ -n "$subdir" ]; then
+ popd >/dev/null 2>&1
+ fi
+ return $result
+ else
+ __%[1]s_debug "Calling _describe"
+ if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then
+ __%[1]s_debug "_describe found some completions"
+
+ # Return the success of having called _describe
+ return 0
+ else
+ __%[1]s_debug "_describe did not find completions."
+ __%[1]s_debug "Checking if we should do file completion."
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ __%[1]s_debug "deactivating file completion"
+
+ # We must return an error code here to let zsh know that there were no
+ # completions found by _describe; this is what will trigger other
+ # matching algorithms to attempt to find completions.
+ # For example zsh can match letters in the middle of words.
+ return 1
+ else
+ # Perform file completion
+ __%[1]s_debug "Activating file completion"
+
+ # We must return the result of this command, so it must be the
+ # last command, or else we must store its result to return it.
+ _arguments '*:filename:_files'" ${flagPrefix}"
+ fi
+ fi
+ fi
+}
+
+# don't run the completion function when being source-ed or eval-ed
+if [ "$funcstack[1]" = "_%[1]s" ]; then
+ _%[1]s
+fi
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
+ activeHelpMarker))
+}
diff --git a/vendor/github.com/spf13/pflag/.editorconfig b/vendor/github.com/spf13/pflag/.editorconfig
new file mode 100644
index 0000000..4492e9f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.go]
+indent_style = tab
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 0000000..c3da290
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.golangci.yaml b/vendor/github.com/spf13/pflag/.golangci.yaml
new file mode 100644
index 0000000..b274f24
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.golangci.yaml
@@ -0,0 +1,4 @@
+linters:
+ disable-all: true
+ enable:
+ - nolintlint
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 0000000..00d04cb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,22 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+install:
+ - go get golang.org/x/lint/golint
+ - export PATH=$GOPATH/bin:$PATH
+ - go install ./...
+
+script:
+ - verify/all.sh -v
+ - go test ./...
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 0000000..63ed1cf
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 0000000..7eacc5b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,296 @@
+[](https://travis-ci.org/spf13/pflag)
+[](https://goreportcard.com/report/github.com/spf13/pflag)
+[](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helper functions available to get the value stored in a Flag if you have a FlagSet but find
+it difficult to keep up with all of the pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+ -v, --verbose verbose output
+ --coolflag string it's really cool flag (default "yeaah")
+ --usefulflag int sometimes it's very useful (default 777)
+```
+
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+ goflag "flag"
+ flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+ flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+ flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 0000000..c4c5c0b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 0000000..3731370
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,185 @@
+package pflag
+
+import (
+ "io"
+ "strconv"
+ "strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+ value *[]bool
+ changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+ bsv := new(boolSliceValue)
+ bsv.value = p
+ *bsv.value = val
+ return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse boolean values into slice
+ out := make([]bool, 0, len(boolStrSlice))
+ for _, boolStr := range boolStrSlice {
+ b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+ if err != nil {
+ return err
+ }
+ out = append(out, b)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+ return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+ boolStrSlice := make([]string, len(*s.value))
+ for i, b := range *s.value {
+ boolStrSlice[i] = strconv.FormatBool(b)
+ }
+
+ out, _ := writeAsCSV(boolStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *boolSliceValue) fromString(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+func (s *boolSliceValue) toString(val bool) string {
+ return strconv.FormatBool(val)
+}
+
+func (s *boolSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *boolSliceValue) Replace(val []string) error {
+ out := make([]bool, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *boolSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []bool{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]bool, len(ss))
+ for i, t := range ss {
+ var err error
+ out[i], err = strconv.ParseBool(t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+ val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+ if err != nil {
+ return []bool{}, err
+ }
+ return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 0000000..67d5304
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+ return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+ bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesHex = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+ return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+ *p = val
+ return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+ bin, err := hex.DecodeString(sval)
+
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 0000000..a0b2679
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ // "+1" means that no specific value was passed, so increment
+ if s == "+1" {
+ *i = countValue(*i + 1)
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 0, 0)
+ *i = countValue(v)
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 0000000..e9debef
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 0000000..badadda
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+ value *[]time.Duration
+ changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+ dsv := new(durationSliceValue)
+ dsv.value = p
+ *dsv.value = val
+ return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *durationSliceValue) Type() string {
+ return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%s", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *durationSliceValue) fromString(val string) (time.Duration, error) {
+ return time.ParseDuration(val)
+}
+
+func (s *durationSliceValue) toString(val time.Duration) string {
+ return fmt.Sprintf("%s", val)
+}
+
+func (s *durationSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *durationSliceValue) Replace(val []string) error {
+ out := make([]time.Duration, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *durationSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []time.Duration{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+ val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+ if err != nil {
+ return []time.Duration{}, err
+ }
+ return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 0000000..7c058de
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1246 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagval, "varname", "v", "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ goflag "flag"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+ // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+ UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ // SortFlags is used to indicate, if user wants to have sorted flags in
+ // help/usage messages.
+ SortFlags bool
+
+ // ParseErrorsWhitelist is used to configure a whitelist of errors
+ ParseErrorsWhitelist ParseErrorsWhitelist
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ orderedActual []*Flag
+ sortedActual []*Flag
+ formal map[NormalizedName]*Flag
+ orderedFormal []*Flag
+ sortedFormal []*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use Output() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+ addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string // default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// SliceValue is a secondary interface to all flags which hold a list
+// of values. This allows full control over the value of list flags,
+// and avoids complicated marshalling and unmarshalling to csv.
+type SliceValue interface {
+ // Append adds the specified value to the end of the flag value list.
+ Append(string) error
+ // Replace will fully overwrite any data currently in the flag value list.
+ Replace([]string) error
+ // GetSlice returns the flag value list as an array of strings.
+ GetSlice() []string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ f.sortedFormal = f.sortedFormal[:0]
+ for fname, flag := range f.formal {
+ nname := f.normalizeFlagName(flag.Name)
+ if fname == nname {
+ continue
+ }
+ flag.Name = string(nname)
+ delete(f.formal, fname)
+ f.formal[nname] = flag
+ if _, set := f.actual[fname]; set {
+ delete(f.actual, fname)
+ f.actual[nname] = flag
+ }
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+// Output returns the destination for usage and error messages. os.Stderr is returned if
+// output was not set or was set to nil.
+func (f *FlagSet) Output() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// Name returns the name of the flag set.
+func (f *FlagSet) Name() string {
+ return f.name
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ if len(f.formal) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.formal) != len(f.sortedFormal) {
+ f.sortedFormal = sortFlags(f.formal)
+ }
+ flags = f.sortedFormal
+ } else {
+ flags = f.orderedFormal
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+ for _, flag := range f.formal {
+ if !flag.Hidden {
+ return true
+ }
+ }
+ return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ if len(f.actual) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.actual) != len(f.sortedActual) {
+ f.sortedActual = sortFlags(f.actual)
+ }
+ flags = f.sortedActual
+ } else {
+ flags = f.orderedActual
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+ if name == "" {
+ return nil
+ }
+ if len(name) > 1 {
+ msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+ fmt.Fprintf(f.Output(), msg)
+ panic(msg)
+ }
+ c := name[0]
+ return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ flag.Hidden = true
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+ return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+
+ err := flag.Value.Set(value)
+ if err != nil {
+ var flagName string
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ flagName = fmt.Sprintf("--%s", flag.Name)
+ }
+ return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+ }
+
+ if !flag.Changed {
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
+
+ flag.Changed = true
+ }
+
+ if flag.Deprecated != "" {
+ fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprint(f.Output(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+ switch f.Value.(type) {
+ case boolFlag:
+ return f.DefValue == "false"
+ case *durationValue:
+ // Beginning in Go 1.7, duration zero values are "0s"
+ return f.DefValue == "0" || f.DefValue == "0s"
+ case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+ return f.DefValue == "0"
+ case *stringValue:
+ return f.DefValue == ""
+ case *ipValue, *ipMaskValue, *ipNetValue:
+ return f.DefValue == ""
+ case *intSliceValue, *stringSliceValue, *stringArrayValue:
+ return f.DefValue == "[]"
+ default:
+ switch f.Value.String() {
+ case "false":
+ return true
+ case "":
+ return true
+ case "":
+ return true
+ case "0":
+ return true
+ }
+ return false
+ }
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+ // Look for a back-quoted name, but avoid the strings package.
+ usage = flag.Usage
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name = usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break // Only one back quote; use type name.
+ }
+ }
+
+ name = flag.Value.Type()
+ switch name {
+ case "bool":
+ name = ""
+ case "float64":
+ name = "float"
+ case "int64":
+ name = "int"
+ case "uint64":
+ name = "uint"
+ case "stringSlice":
+ name = "strings"
+ case "intSlice":
+ name = "ints"
+ case "uintSlice":
+ name = "uints"
+ case "boolSlice":
+ name = "bools"
+ }
+
+ return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+ if i+slop > len(s) {
+ return s, ""
+ }
+
+ w := strings.LastIndexAny(s[:i], " \t\n")
+ if w <= 0 {
+ return s, ""
+ }
+ nlPos := strings.LastIndex(s[:i], "\n")
+ if nlPos > 0 && nlPos < w {
+ return s[:nlPos], s[nlPos+1:]
+ }
+ return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+ if w == 0 {
+ return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ // space between indent i and end of line width w into which
+ // we should wrap the text.
+ wrap := w - i
+
+ var r, l string
+
+ // Not enough space for sensible wrapping. Wrap as a block on
+ // the next line instead.
+ if wrap < 24 {
+ i = 16
+ wrap = w - i
+ r += "\n" + strings.Repeat(" ", i)
+ }
+ // If still not enough space then don't even try to wrap.
+ if wrap < 24 {
+ return strings.Replace(s, "\n", r, -1)
+ }
+
+ // Try to avoid short orphan words on the final line, by
+ // allowing wrapN to go a bit over if that would fit in the
+ // remainder of the line.
+ slop := 5
+ wrap = wrap - slop
+
+ // Handle first line, which is indented by the caller (or the
+ // special case above)
+ l, s = wrapN(wrap, slop, s)
+ r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+ // Now wrap the rest
+ for s != "" {
+ var t string
+
+ t, s = wrapN(wrap, slop, s)
+ r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+ buf := new(bytes.Buffer)
+
+ lines := make([]string, 0, len(f.formal))
+
+ maxlen := 0
+ f.VisitAll(func(flag *Flag) {
+ if flag.Hidden {
+ return
+ }
+
+ line := ""
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ line = fmt.Sprintf(" --%s", flag.Name)
+ }
+
+ varname, usage := UnquoteUsage(flag)
+ if varname != "" {
+ line += " " + varname
+ }
+ if flag.NoOptDefVal != "" {
+ switch flag.Value.Type() {
+ case "string":
+ line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+ case "bool":
+ if flag.NoOptDefVal != "true" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ case "count":
+ if flag.NoOptDefVal != "+1" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ default:
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ }
+
+ // This special character will be replaced with spacing once the
+ // correct alignment is calculated
+ line += "\x00"
+ if len(line) > maxlen {
+ maxlen = len(line)
+ }
+
+ line += usage
+ if !flag.defaultIsZeroValue() {
+ if flag.Value.Type() == "string" {
+ line += fmt.Sprintf(" (default %q)", flag.DefValue)
+ } else {
+ line += fmt.Sprintf(" (default %s)", flag.DefValue)
+ }
+ }
+ if len(flag.Deprecated) != 0 {
+ line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+ }
+
+ lines = append(lines, line)
+ })
+
+ for _, line := range lines {
+ sidx := strings.Index(line, "\x00")
+ spacing := strings.Repeat(" ", maxlen-sidx)
+ // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+ fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+ }
+
+ return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadyThere := f.formal[normalizedFlagName]
+ if alreadyThere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.Output(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+ f.orderedFormal = append(f.orderedFormal, flag)
+
+ if flag.Shorthand == "" {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+ fmt.Fprintf(f.Output(), msg)
+ panic(msg)
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ used, alreadyThere := f.shorthands[c]
+ if alreadyThere {
+ msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+ fmt.Fprintf(f.Output(), msg)
+ panic(msg)
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ if f.errorHandling != ContinueOnError {
+ fmt.Fprintln(f.Output(), err)
+ f.usage()
+ }
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+ if len(args) == 0 {
+ //--unknown
+ return args
+ }
+
+ first := args[0]
+ if len(first) > 0 && first[0] == '-' {
+ //--unknown --next-flag ...
+ return args
+ }
+
+ //--unknown arg ... (args will be arg ...)
+ if len(args) > 1 {
+ return args[1:]
+ }
+ return nil
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, exists := f.formal[f.normalizeFlagName(name)]
+
+ if !exists {
+ switch {
+ case name == "help":
+ f.usage()
+ return a, ErrHelp
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // --unknown=unknownval arg ...
+ // we do not want to lose arg in this case
+ if len(split) >= 2 {
+ return a, nil
+ }
+
+ return stripUnknownFlagValue(a), nil
+ default:
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ }
+
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if flag.NoOptDefVal != "" {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
+ if strings.HasPrefix(shorthands, "test.") {
+ return
+ }
+
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, exists := f.shorthands[c]
+ if !exists {
+ switch {
+ case c == 'h':
+ f.usage()
+ err = ErrHelp
+ return
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // '-f=arg arg ...'
+ // we do not want to lose arg in this case
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ outShorts = ""
+ return
+ }
+
+ outArgs = stripUnknownFlagValue(outArgs)
+ return
+ default:
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ }
+
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ // '-f=arg'
+ value = shorthands[2:]
+ outShorts = ""
+ } else if flag.NoOptDefVal != "" {
+ // '-f' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ // '-farg'
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ // '-f arg'
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ // '-f' (arg was required)
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+
+ if flag.ShorthandDeprecated != "" {
+ fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args, fn)
+ } else {
+ args, err = f.parseShortArg(s, args, fn)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ if f.addedGoFlagSets != nil {
+ for _, goFlagSet := range f.addedGoFlagSets {
+ goFlagSet.Parse(nil)
+ }
+ }
+ f.parsed = true
+
+ if len(arguments) < 0 {
+ return nil
+ }
+
+ f.args = make([]string, 0, len(arguments))
+
+ set := func(flag *Flag, value string) error {
+ return f.Set(flag.Name, value)
+ }
+
+ err := f.parseArgs(arguments, set)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ fmt.Println(err)
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ err := f.parseArgs(arguments, fn)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ SortFlags: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 0000000..a243f81
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go
new file mode 100644
index 0000000..caa3527
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float32Slice Value
+type float32SliceValue struct {
+ value *[]float32
+ changed bool
+}
+
+func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue {
+ isv := new(float32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = float32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float32SliceValue) Type() string {
+ return "float32Slice"
+}
+
+func (s *float32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float32SliceValue) fromString(val string) (float32, error) {
+ t64, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(t64), nil
+}
+
+func (s *float32SliceValue) toString(val float32) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float32SliceValue) Replace(val []string) error {
+ out := make([]float32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = float32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetFloat32Slice return the []float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) {
+ val, err := f.getFlagType(name, "float32Slice", float32SliceConv)
+ if err != nil {
+ return []float32{}, err
+ }
+ return val.([]float32), nil
+}
+
+// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string.
+// The argument p points to a float32[] variable in which to store the value of the flag.
+func Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func Float32Slice(name string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, "", value, usage)
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 0000000..04b5492
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go
new file mode 100644
index 0000000..85bf307
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float64Slice Value
+type float64SliceValue struct {
+ value *[]float64
+ changed bool
+}
+
+func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue {
+ isv := new(float64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float64SliceValue) Type() string {
+ return "float64Slice"
+}
+
+func (s *float64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float64SliceValue) fromString(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+func (s *float64SliceValue) toString(val float64) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float64SliceValue) Replace(val []string) error {
+ out := make([]float64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetFloat64Slice return the []float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) {
+ val, err := f.getFlagType(name, "float64Slice", float64SliceConv)
+ if err != nil {
+ return []float64{}, err
+ }
+ return val.([]float64), nil
+}
+
+// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string.
+// The argument p points to a float64[] variable in which to store the value of the flag.
+func Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func Float64Slice(name string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, "", value, usage)
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 0000000..d3dd72b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "reflect"
+ "strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ // Ex: if the golang flag was -v, allow both -v and --v to work
+ if len(flag.Name) == 1 {
+ flag.Shorthand = flag.Name
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+ if f.addedGoFlagSets == nil {
+ f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+ }
+ f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 0000000..1474b89
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 0000000..f1a01d0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+ *p = val
+ return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ *i = int16Value(v)
+ return err
+}
+
+func (i *int16Value) Type() string {
+ return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+ val, err := f.getFlagType(name, "int16", int16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 0000000..9b95944
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go
new file mode 100644
index 0000000..ff128ff
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int32Slice Value
+type int32SliceValue struct {
+ value *[]int32
+ changed bool
+}
+
+func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue {
+ isv := new(int32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = int32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int32SliceValue) Type() string {
+ return "int32Slice"
+}
+
+func (s *int32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int32SliceValue) fromString(val string) (int32, error) {
+ t64, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(t64), nil
+}
+
+func (s *int32SliceValue) toString(val int32) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int32SliceValue) Replace(val []string) error {
+ out := make([]int32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = int32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetInt32Slice return the []int32 value of a flag with the given name
+func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) {
+ val, err := f.getFlagType(name, "int32Slice", int32SliceConv)
+ if err != nil {
+ return []int32{}, err
+ }
+ return val.([]int32), nil
+}
+
+// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string.
+// The argument p points to a int32[] variable in which to store the value of the flag.
+func Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func Int32Slice(name string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, "", value, usage)
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 0000000..0026d78
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go
new file mode 100644
index 0000000..2546463
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int64Slice Value
+type int64SliceValue struct {
+ value *[]int64
+ changed bool
+}
+
+func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue {
+ isv := new(int64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int64SliceValue) Type() string {
+ return "int64Slice"
+}
+
+func (s *int64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int64SliceValue) fromString(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+func (s *int64SliceValue) toString(val int64) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int64SliceValue) Replace(val []string) error {
+ out := make([]int64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetInt64Slice return the []int64 value of a flag with the given name
+func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) {
+ val, err := f.getFlagType(name, "int64Slice", int64SliceConv)
+ if err != nil {
+ return []int64{}, err
+ }
+ return val.([]int64), nil
+}
+
+// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string.
+// The argument p points to a int64[] variable in which to store the value of the flag.
+func Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func Int64Slice(name string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, "", value, usage)
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 0000000..4da9222
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 0000000..e71c39d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,158 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *intSliceValue) Append(val string) error {
+ i, err := strconv.Atoi(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *intSliceValue) Replace(val []string) error {
+ out := make([]int, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *intSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = strconv.Itoa(d)
+ }
+ return out
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 0000000..06b8bcb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,97 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ if s == "" {
+ return nil
+ }
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 0000000..775faae
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,186 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+ value *[]net.IP
+ changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+ ipsv := new(ipSliceValue)
+ ipsv.value = p
+ *ipsv.value = val
+ return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IP, 0, len(ipStrSlice))
+ for _, ipStr := range ipStrSlice {
+ ip := net.ParseIP(strings.TrimSpace(ipStr))
+ if ip == nil {
+ return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+ }
+ out = append(out, ip)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+ return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+ ipStrSlice := make([]string, len(*s.value))
+ for i, ip := range *s.value {
+ ipStrSlice[i] = ip.String()
+ }
+
+ out, _ := writeAsCSV(ipStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *ipSliceValue) fromString(val string) (net.IP, error) {
+ return net.ParseIP(strings.TrimSpace(val)), nil
+}
+
+func (s *ipSliceValue) toString(val net.IP) string {
+ return val.String()
+}
+
+func (s *ipSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *ipSliceValue) Replace(val []string) error {
+ out := make([]net.IP, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *ipSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IP{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IP, len(ss))
+ for i, sval := range ss {
+ ip := net.ParseIP(strings.TrimSpace(sval))
+ if ip == nil {
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+ }
+ out[i] = ip
+ }
+ return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+ val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+ if err != nil {
+ return []net.IP{}, err
+ }
+ return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 0000000..5bd44bd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 0000000..e2c1b8b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go
new file mode 100644
index 0000000..6b541aa
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet_slice.go
@@ -0,0 +1,147 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipNetSlice Value
+type ipNetSliceValue struct {
+ value *[]net.IPNet
+ changed bool
+}
+
+func newIPNetSliceValue(val []net.IPNet, p *[]net.IPNet) *ipNetSliceValue {
+ ipnsv := new(ipNetSliceValue)
+ ipnsv.value = p
+ *ipnsv.value = val
+ return ipnsv
+}
+
+// Set converts, and assigns, the comma-separated IPNet argument string representation as the []net.IPNet value of this flag.
+// If Set is called on a flag that already has a []net.IPNet assigned, the newly converted values will be appended.
+func (s *ipNetSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipNetStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IPNet, 0, len(ipNetStrSlice))
+ for _, ipNetStr := range ipNetStrSlice {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(ipNetStr))
+ if err != nil {
+ return fmt.Errorf("invalid string being converted to CIDR: %s", ipNetStr)
+ }
+ out = append(out, *n)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipNetSliceValue) Type() string {
+ return "ipNetSlice"
+}
+
+// String defines a "native" format for this net.IPNet slice flag value.
+func (s *ipNetSliceValue) String() string {
+
+ ipNetStrSlice := make([]string, len(*s.value))
+ for i, n := range *s.value {
+ ipNetStrSlice[i] = n.String()
+ }
+
+ out, _ := writeAsCSV(ipNetStrSlice)
+ return "[" + out + "]"
+}
+
+func ipNetSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Emtpy string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IPNet{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IPNet, len(ss))
+ for i, sval := range ss {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err != nil {
+ return nil, fmt.Errorf("invalid string being converted to CIDR: %s", sval)
+ }
+ out[i] = *n
+ }
+ return out, nil
+}
+
+// GetIPNetSlice returns the []net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNetSlice(name string) ([]net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNetSlice", ipNetSliceConv)
+ if err != nil {
+ return []net.IPNet{}, err
+ }
+ return val.([]net.IPNet), nil
+}
+
+// IPNetSliceVar defines a ipNetSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) {
+ f.VarP(newIPNetSliceValue(value, p), name, "", usage)
+}
+
+// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) {
+ f.VarP(newIPNetSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPNetSliceVar defines a []net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to a []net.IPNet variable in which to store the value of the flag.
+func IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetSliceValue(value, p), name, "", usage)
+}
+
+// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IPNet variable that stores the value of that flag.
+func (f *FlagSet) IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet {
+ p := []net.IPNet{}
+ f.IPNetSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet {
+ p := []net.IPNet{}
+ f.IPNetSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet {
+ return CommandLine.IPNetSliceP(name, "", value, usage)
+}
+
+// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet {
+ return CommandLine.IPNetSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 0000000..04e0a26
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 0000000..d1ff0a9
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,125 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+ ssv := new(stringArrayValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+ if !s.changed {
+ *s.value = []string{val}
+ s.changed = true
+ } else {
+ *s.value = append(*s.value, val)
+ }
+ return nil
+}
+
+func (s *stringArrayValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringArrayValue) Replace(val []string) error {
+ out := make([]string, len(val))
+ for i, d := range val {
+ out[i] = d
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *stringArrayValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = d
+ }
+ return out
+}
+
+func (s *stringArrayValue) Type() string {
+ return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a array with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 0000000..3cb2e69
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,163 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+ b := &bytes.Buffer{}
+ w := csv.NewWriter(b)
+ err := w.Write(vals)
+ if err != nil {
+ return "", err
+ }
+ w.Flush()
+ return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ v, err := readAsCSV(val)
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func (s *stringSliceValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringSliceValue) Replace(val []string) error {
+ *s.value = val
+ return nil
+}
+
+func (s *stringSliceValue) GetSlice() []string {
+ return *s.value
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 0000000..5ceda39
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+ value *map[string]int
+ changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+ ssv := new(stringToIntValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToIntValue) Type() string {
+ return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+ val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+ if err != nil {
+ return map[string]int{}, err
+ }
+ return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go
new file mode 100644
index 0000000..a807a04
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int64.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt64 Value
+type stringToInt64Value struct {
+ value *map[string]int64
+ changed bool
+}
+
+func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value {
+ ssv := new(stringToInt64Value)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToInt64Value) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToInt64Value) Type() string {
+ return "stringToInt64"
+}
+
+func (s *stringToInt64Value) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.FormatInt(v, 10))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToInt64Conv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt64 return the map[string]int64 value of a flag with the given name
+func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) {
+ val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv)
+ if err != nil {
+ return map[string]int64{}, err
+ }
+ return val.(map[string]int64), nil
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, "", value, usage)
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 0000000..890a01a
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+ value *map[string]string
+ changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+ ssv := new(stringToStringValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+ var ss []string
+ n := strings.Count(val, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", val)
+ case 1:
+ ss = append(ss, strings.Trim(val, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(val))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToStringValue) Type() string {
+ return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+ records := make([]string, 0, len(*s.value)>>1)
+ for k, v := range *s.value {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]string{}, nil
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+ val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+ if err != nil {
+ return map[string]string{}, err
+ }
+ return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 0000000..dcbc2b7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 0000000..7e9914e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 0000000..d802453
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 0000000..f62240f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 0000000..bb0e83c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 0000000..5fa9248
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,168 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+ value *[]uint
+ changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+ uisv := new(uintSliceValue)
+ uisv.value = p
+ *uisv.value = val
+ return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return err
+ }
+ out[i] = uint(u)
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *uintSliceValue) Type() string {
+ return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *uintSliceValue) fromString(val string) (uint, error) {
+ t, err := strconv.ParseUint(val, 10, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(t), nil
+}
+
+func (s *uintSliceValue) toString(val uint) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *uintSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *uintSliceValue) Replace(val []string) error {
+ out := make([]uint, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *uintSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []uint{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = uint(u)
+ }
+ return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+ val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+ if err != nil {
+ return []uint{}, err
+ }
+ return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/go.uber.org/automaxprocs/LICENSE b/vendor/go.uber.org/automaxprocs/LICENSE
new file mode 100644
index 0000000..20dcf51
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go
new file mode 100644
index 0000000..fe4ecf5
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+// CGroup represents the data structure for a Linux control group.
+type CGroup struct {
+ path string
+}
+
+// NewCGroup returns a new *CGroup from a given path.
+func NewCGroup(path string) *CGroup {
+ return &CGroup{path: path}
+}
+
+// Path returns the path of the CGroup*.
+func (cg *CGroup) Path() string {
+ return cg.path
+}
+
+// ParamPath returns the path of the given cgroup param under itself.
+func (cg *CGroup) ParamPath(param string) string {
+ return filepath.Join(cg.path, param)
+}
+
+// readFirstLine reads the first line from a cgroup param file.
+func (cg *CGroup) readFirstLine(param string) (string, error) {
+ paramFile, err := os.Open(cg.ParamPath(param))
+ if err != nil {
+ return "", err
+ }
+ defer paramFile.Close()
+
+ scanner := bufio.NewScanner(paramFile)
+ if scanner.Scan() {
+ return scanner.Text(), nil
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+ return "", io.ErrUnexpectedEOF
+}
+
+// readInt parses the first line from a cgroup param file as int.
+func (cg *CGroup) readInt(param string) (int, error) {
+ text, err := cg.readFirstLine(param)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.Atoi(text)
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go
new file mode 100644
index 0000000..e89f543
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+const (
+ // _cgroupFSType is the Linux CGroup file system type used in
+ // `/proc/$PID/mountinfo`.
+ _cgroupFSType = "cgroup"
+ // _cgroupSubsysCPU is the CPU CGroup subsystem.
+ _cgroupSubsysCPU = "cpu"
+ // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem.
+ _cgroupSubsysCPUAcct = "cpuacct"
+ // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem.
+ _cgroupSubsysCPUSet = "cpuset"
+ // _cgroupSubsysMemory is the Memory CGroup subsystem.
+ _cgroupSubsysMemory = "memory"
+
+ // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota
+ // parameter.
+ _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us"
+ // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period
+ // parameter.
+ _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us"
+)
+
+const (
+ _procPathCGroup = "/proc/self/cgroup"
+ _procPathMountInfo = "/proc/self/mountinfo"
+)
+
+// CGroups is a map that associates each CGroup with its subsystem name.
+type CGroups map[string]*CGroup
+
+// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files
+// under for some process under `/proc` file system (see also proc(5) for more
+// information).
+func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) {
+ cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+
+ cgroups := make(CGroups)
+ newMountPoint := func(mp *MountPoint) error {
+ if mp.FSType != _cgroupFSType {
+ return nil
+ }
+
+ for _, opt := range mp.SuperOptions {
+ subsys, exists := cgroupSubsystems[opt]
+ if !exists {
+ continue
+ }
+
+ cgroupPath, err := mp.Translate(subsys.Name)
+ if err != nil {
+ return err
+ }
+ cgroups[opt] = NewCGroup(cgroupPath)
+ }
+
+ return nil
+ }
+
+ if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
+ return nil, err
+ }
+ return cgroups, nil
+}
+
+// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current
+// process.
+func NewCGroupsForCurrentProcess() (CGroups, error) {
+ return NewCGroups(_procPathMountInfo, _procPathCGroup)
+}
+
+// CPUQuota returns the CPU quota applied with the CPU cgroup controller.
+// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of
+// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`.
+func (cg CGroups) CPUQuota() (float64, bool, error) {
+ cpuCGroup, exists := cg[_cgroupSubsysCPU]
+ if !exists {
+ return -1, false, nil
+ }
+
+ cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam)
+ if defined := cfsQuotaUs > 0; err != nil || !defined {
+ return -1, defined, err
+ }
+
+ cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam)
+ if defined := cfsPeriodUs > 0; err != nil || !defined {
+ return -1, defined, err
+ }
+
+ return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go
new file mode 100644
index 0000000..7855606
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go
@@ -0,0 +1,176 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+const (
+ // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period
+ // parameter.
+ _cgroupv2CPUMax = "cpu.max"
+ // _cgroupFSType is the Linux CGroup-V2 file system type used in
+ // `/proc/$PID/mountinfo`.
+ _cgroupv2FSType = "cgroup2"
+
+ _cgroupv2MountPoint = "/sys/fs/cgroup"
+
+ _cgroupV2CPUMaxDefaultPeriod = 100000
+ _cgroupV2CPUMaxQuotaMax = "max"
+)
+
+const (
+ _cgroupv2CPUMaxQuotaIndex = iota
+ _cgroupv2CPUMaxPeriodIndex
+)
+
+// ErrNotV2 indicates that the system is not using cgroups2.
+var ErrNotV2 = errors.New("not using cgroups2")
+
+// CGroups2 provides access to cgroups data for systems using cgroups2.
+type CGroups2 struct {
+ mountPoint string
+ groupPath string
+ cpuMaxFile string
+}
+
+// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process.
+//
+// This returns ErrNotV2 if the system is not using cgroups2.
+func NewCGroups2ForCurrentProcess() (*CGroups2, error) {
+ return newCGroups2From(_procPathMountInfo, _procPathCGroup)
+}
+
+func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) {
+ isV2, err := isCGroupV2(mountInfoPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if !isV2 {
+ return nil, ErrNotV2
+ }
+
+ subsystems, err := parseCGroupSubsystems(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+
+ // Find v2 subsystem by looking for the `0` id
+ var v2subsys *CGroupSubsys
+ for _, subsys := range subsystems {
+ if subsys.ID == 0 {
+ v2subsys = subsys
+ break
+ }
+ }
+
+ if v2subsys == nil {
+ return nil, ErrNotV2
+ }
+
+ return &CGroups2{
+ mountPoint: _cgroupv2MountPoint,
+ groupPath: v2subsys.Name,
+ cpuMaxFile: _cgroupv2CPUMax,
+ }, nil
+}
+
+func isCGroupV2(procPathMountInfo string) (bool, error) {
+ var (
+ isV2 bool
+ newMountPoint = func(mp *MountPoint) error {
+ isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint)
+ return nil
+ }
+ )
+
+ if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
+ return false, err
+ }
+
+ return isV2, nil
+}
+
+// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller.
+// It is a result of reading cpu quota and period from cpu.max file.
+// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns
+// (-1, false, nil)
+func (cg *CGroups2) CPUQuota() (float64, bool, error) {
+ cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return -1, false, nil
+ }
+ return -1, false, err
+ }
+ defer cpuMaxParams.Close()
+
+ scanner := bufio.NewScanner(cpuMaxParams)
+ if scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) == 0 || len(fields) > 2 {
+ return -1, false, fmt.Errorf("invalid format")
+ }
+
+ if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax {
+ return -1, false, nil
+ }
+
+ max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex])
+ if err != nil {
+ return -1, false, err
+ }
+
+ var period int
+ if len(fields) == 1 {
+ period = _cgroupV2CPUMaxDefaultPeriod
+ } else {
+ period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex])
+ if err != nil {
+ return -1, false, err
+ }
+
+ if period == 0 {
+ return -1, false, errors.New("zero value for period is not allowed")
+ }
+ }
+
+ return float64(max) / float64(period), true, nil
+ }
+
+ if err := scanner.Err(); err != nil {
+ return -1, false, err
+ }
+
+ return 0, false, io.ErrUnexpectedEOF
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go
new file mode 100644
index 0000000..113555f
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package cgroups provides utilities to access Linux control group (CGroups)
+// parameters (CPU quota, for example) for a given process.
+package cgroups
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go
new file mode 100644
index 0000000..94ac75a
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import "fmt"
+
+type cgroupSubsysFormatInvalidError struct {
+ line string
+}
+
+type mountPointFormatInvalidError struct {
+ line string
+}
+
+type pathNotExposedFromMountPointError struct {
+ mountPoint string
+ root string
+ path string
+}
+
+func (err cgroupSubsysFormatInvalidError) Error() string {
+ return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line)
+}
+
+func (err mountPointFormatInvalidError) Error() string {
+ return fmt.Sprintf("invalid format for MountPoint: %q", err.line)
+}
+
+func (err pathNotExposedFromMountPointError) Error() string {
+ return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint)
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go
new file mode 100644
index 0000000..f3877f7
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const (
+ _mountInfoSep = " "
+ _mountInfoOptsSep = ","
+ _mountInfoOptionalFieldsSep = "-"
+)
+
+const (
+ _miFieldIDMountID = iota
+ _miFieldIDParentID
+ _miFieldIDDeviceID
+ _miFieldIDRoot
+ _miFieldIDMountPoint
+ _miFieldIDOptions
+ _miFieldIDOptionalFields
+
+ _miFieldCountFirstHalf
+)
+
+const (
+ _miFieldOffsetFSType = iota
+ _miFieldOffsetMountSource
+ _miFieldOffsetSuperOptions
+
+ _miFieldCountSecondHalf
+)
+
+const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf
+
+// MountPoint is the data structure for the mount points in
+// `/proc/$PID/mountinfo`. See also proc(5) for more information.
+type MountPoint struct {
+ MountID int
+ ParentID int
+ DeviceID string
+ Root string
+ MountPoint string
+ Options []string
+ OptionalFields []string
+ FSType string
+ MountSource string
+ SuperOptions []string
+}
+
+// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and
+// returns a new *MountPoint.
+func NewMountPointFromLine(line string) (*MountPoint, error) {
+ fields := strings.Split(line, _mountInfoSep)
+
+ if len(fields) < _miFieldCountMin {
+ return nil, mountPointFormatInvalidError{line}
+ }
+
+ mountID, err := strconv.Atoi(fields[_miFieldIDMountID])
+ if err != nil {
+ return nil, err
+ }
+
+ parentID, err := strconv.Atoi(fields[_miFieldIDParentID])
+ if err != nil {
+ return nil, err
+ }
+
+ for i, field := range fields[_miFieldIDOptionalFields:] {
+ if field == _mountInfoOptionalFieldsSep {
+ // End of optional fields.
+ fsTypeStart := _miFieldIDOptionalFields + i + 1
+
+ // Now we know where the optional fields end, split the line again with a
+ // limit to avoid issues with spaces in super options as present on WSL.
+ fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf)
+ if len(fields) != fsTypeStart+_miFieldCountSecondHalf {
+ return nil, mountPointFormatInvalidError{line}
+ }
+
+ miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart
+ miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart
+ miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart
+
+ return &MountPoint{
+ MountID: mountID,
+ ParentID: parentID,
+ DeviceID: fields[_miFieldIDDeviceID],
+ Root: fields[_miFieldIDRoot],
+ MountPoint: fields[_miFieldIDMountPoint],
+ Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep),
+ OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)],
+ FSType: fields[miFieldIDFSType],
+ MountSource: fields[miFieldIDMountSource],
+ SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep),
+ }, nil
+ }
+ }
+
+ return nil, mountPointFormatInvalidError{line}
+}
+
+// Translate converts an absolute path inside the *MountPoint's file system to
+// the host file system path in the mount namespace the *MountPoint belongs to.
+func (mp *MountPoint) Translate(absPath string) (string, error) {
+ relPath, err := filepath.Rel(mp.Root, absPath)
+
+ if err != nil {
+ return "", err
+ }
+ if relPath == ".." || strings.HasPrefix(relPath, "../") {
+ return "", pathNotExposedFromMountPointError{
+ mountPoint: mp.MountPoint,
+ root: mp.Root,
+ path: absPath,
+ }
+ }
+
+ return filepath.Join(mp.MountPoint, relPath), nil
+}
+
+// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`)
+// and yields parsed *MountPoint into newMountPoint.
+func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error {
+ mountInfoFile, err := os.Open(procPathMountInfo)
+ if err != nil {
+ return err
+ }
+ defer mountInfoFile.Close()
+
+ scanner := bufio.NewScanner(mountInfoFile)
+
+ for scanner.Scan() {
+ mountPoint, err := NewMountPointFromLine(scanner.Text())
+ if err != nil {
+ return err
+ }
+ if err := newMountPoint(mountPoint); err != nil {
+ return err
+ }
+ }
+
+ return scanner.Err()
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go
new file mode 100644
index 0000000..cddc3ea
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ _cgroupSep = ":"
+ _cgroupSubsysSep = ","
+)
+
+const (
+ _csFieldIDID = iota
+ _csFieldIDSubsystems
+ _csFieldIDName
+ _csFieldCount
+)
+
+// CGroupSubsys represents the data structure for entities in
+// `/proc/$PID/cgroup`. See also proc(5) for more information.
+type CGroupSubsys struct {
+ ID int
+ Subsystems []string
+ Name string
+}
+
+// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in
+// the format of `/proc/$PID/cgroup`
+func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) {
+ fields := strings.SplitN(line, _cgroupSep, _csFieldCount)
+
+ if len(fields) != _csFieldCount {
+ return nil, cgroupSubsysFormatInvalidError{line}
+ }
+
+ id, err := strconv.Atoi(fields[_csFieldIDID])
+ if err != nil {
+ return nil, err
+ }
+
+ cgroup := &CGroupSubsys{
+ ID: id,
+ Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep),
+ Name: fields[_csFieldIDName],
+ }
+
+ return cgroup, nil
+}
+
+// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`)
+// and returns a new map[string]*CGroupSubsys.
+func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) {
+ cgroupFile, err := os.Open(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+ defer cgroupFile.Close()
+
+ scanner := bufio.NewScanner(cgroupFile)
+ subsystems := make(map[string]*CGroupSubsys)
+
+ for scanner.Scan() {
+ cgroup, err := NewCGroupSubsysFromLine(scanner.Text())
+ if err != nil {
+ return nil, err
+ }
+ for _, subsys := range cgroup.Subsystems {
+ subsystems[subsys] = cgroup
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return subsystems, nil
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
new file mode 100644
index 0000000..f9057fd
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package runtime
+
+import (
+ "errors"
+
+ cg "go.uber.org/automaxprocs/internal/cgroups"
+)
+
+// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
+// to a valid GOMAXPROCS value. The quota is converted from float to int using round.
+// If round == nil, DefaultRoundFunc is used.
+func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) {
+ if round == nil {
+ round = DefaultRoundFunc
+ }
+ cgroups, err := _newQueryer()
+ if err != nil {
+ return -1, CPUQuotaUndefined, err
+ }
+
+ quota, defined, err := cgroups.CPUQuota()
+ if !defined || err != nil {
+ return -1, CPUQuotaUndefined, err
+ }
+
+ maxProcs := round(quota)
+ if minValue > 0 && maxProcs < minValue {
+ return minValue, CPUQuotaMinUsed, nil
+ }
+ return maxProcs, CPUQuotaUsed, nil
+}
+
+type queryer interface {
+ CPUQuota() (float64, bool, error)
+}
+
+var (
+ _newCgroups2 = cg.NewCGroups2ForCurrentProcess
+ _newCgroups = cg.NewCGroupsForCurrentProcess
+ _newQueryer = newQueryer
+)
+
+func newQueryer() (queryer, error) {
+ cgroups, err := _newCgroups2()
+ if err == nil {
+ return cgroups, nil
+ }
+ if errors.Is(err, cg.ErrNotV2) {
+ return _newCgroups()
+ }
+ return nil, err
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
new file mode 100644
index 0000000..e747015
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !linux
+// +build !linux
+
+package runtime
+
+// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
+// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
+// current OS.
+func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) {
+ return -1, CPUQuotaUndefined, nil
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
new file mode 100644
index 0000000..f8a2834
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package runtime
+
+import "math"
+
+// CPUQuotaStatus presents the status of how CPU quota is used
+type CPUQuotaStatus int
+
+const (
+ // CPUQuotaUndefined is returned when CPU quota is undefined
+ CPUQuotaUndefined CPUQuotaStatus = iota
+ // CPUQuotaUsed is returned when a valid CPU quota can be used
+ CPUQuotaUsed
+ // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value
+ CPUQuotaMinUsed
+)
+
+// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor).
+func DefaultRoundFunc(v float64) int {
+ return int(math.Floor(v))
+}
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
new file mode 100644
index 0000000..e561fe6
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
@@ -0,0 +1,139 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to
+// match the configured Linux CPU quota. Unlike the top-level automaxprocs
+// package, it lets the caller configure logging and handle errors.
+package maxprocs // import "go.uber.org/automaxprocs/maxprocs"
+
+import (
+ "os"
+ "runtime"
+
+ iruntime "go.uber.org/automaxprocs/internal/runtime"
+)
+
+const _maxProcsKey = "GOMAXPROCS"
+
+func currentMaxProcs() int {
+ return runtime.GOMAXPROCS(0)
+}
+
+type config struct {
+ printf func(string, ...interface{})
+ procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error)
+ minGOMAXPROCS int
+ roundQuotaFunc func(v float64) int
+}
+
+func (c *config) log(fmt string, args ...interface{}) {
+ if c.printf != nil {
+ c.printf(fmt, args...)
+ }
+}
+
+// An Option alters the behavior of Set.
+type Option interface {
+ apply(*config)
+}
+
+// Logger uses the supplied printf implementation for log output. By default,
+// Set doesn't log anything.
+func Logger(printf func(string, ...interface{})) Option {
+ return optionFunc(func(cfg *config) {
+ cfg.printf = printf
+ })
+}
+
+// Min sets the minimum GOMAXPROCS value that will be used.
+// Any value below 1 is ignored.
+func Min(n int) Option {
+ return optionFunc(func(cfg *config) {
+ if n >= 1 {
+ cfg.minGOMAXPROCS = n
+ }
+ })
+}
+
+// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int.
+func RoundQuotaFunc(rf func(v float64) int) Option {
+ return optionFunc(func(cfg *config) {
+ cfg.roundQuotaFunc = rf
+ })
+}
+
+type optionFunc func(*config)
+
+func (of optionFunc) apply(cfg *config) { of(cfg) }
+
+// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning
+// any error encountered and an undo function.
+//
+// Set is a no-op on non-Linux systems and in Linux environments without a
+// configured CPU quota.
+func Set(opts ...Option) (func(), error) {
+ cfg := &config{
+ procs: iruntime.CPUQuotaToGOMAXPROCS,
+ roundQuotaFunc: iruntime.DefaultRoundFunc,
+ minGOMAXPROCS: 1,
+ }
+ for _, o := range opts {
+ o.apply(cfg)
+ }
+
+ undoNoop := func() {
+ cfg.log("maxprocs: No GOMAXPROCS change to reset")
+ }
+
+ // Honor the GOMAXPROCS environment variable if present. Otherwise, amend
+ // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is
+ // Linux, and guarantee a minimum value of 1. The minimum guaranteed value
+ // can be overridden using `maxprocs.Min()`.
+ if max, exists := os.LookupEnv(_maxProcsKey); exists {
+ cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max)
+ return undoNoop, nil
+ }
+
+ maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc)
+ if err != nil {
+ return undoNoop, err
+ }
+
+ if status == iruntime.CPUQuotaUndefined {
+ cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs())
+ return undoNoop, nil
+ }
+
+ prev := currentMaxProcs()
+ undo := func() {
+ cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev)
+ runtime.GOMAXPROCS(prev)
+ }
+
+ switch status {
+ case iruntime.CPUQuotaMinUsed:
+ cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs)
+ case iruntime.CPUQuotaUsed:
+ cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs)
+ }
+
+ runtime.GOMAXPROCS(maxProcs)
+ return undo, nil
+}
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
new file mode 100644
index 0000000..cc7fc5a
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package maxprocs
+
+// Version is the current package version.
+const Version = "1.6.0"
diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml
new file mode 100644
index 0000000..6d4d1be
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.codecov.yml
@@ -0,0 +1,15 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore
new file mode 100644
index 0000000..b9a05e3
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+cover.html
+cover.out
+/bin
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
new file mode 100644
index 0000000..f8177b9
--- /dev/null
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -0,0 +1,95 @@
+Releases
+========
+
+v1.11.0 (2023-03-28)
+====================
+- `Errors` now supports any error that implements multiple-error
+ interface.
+- Add `Every` function to allow checking if all errors in the chain
+ satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
+v1.9.0 (2022-12-12)
+===================
+
+- Add `AppendFunc` that allow passsing functions to similar to
+ `AppendInvoke`.
+
+- Bump up yaml.v3 dependency to 3.0.1.
+
+v1.8.0 (2022-02-28)
+===================
+
+- `Combine`: perform zero allocations when there are no errors.
+
+
+v1.7.0 (2021-05-06)
+===================
+
+- Add `AppendInvoke` to append into errors from `defer` blocks.
+
+
+v1.6.0 (2020-09-14)
+===================
+
+- Actually drop library dependency on development-time tooling.
+
+
+v1.5.0 (2020-02-24)
+===================
+
+- Drop library dependency on development-time tooling.
+
+
+v1.4.0 (2019-11-04)
+===================
+
+- Add `AppendInto` function to more ergonomically build errors inside a
+ loop.
+
+
+v1.3.0 (2019-10-29)
+===================
+
+- Switch to Go modules.
+
+
+v1.2.0 (2019-09-26)
+===================
+
+- Support extracting and matching against wrapped errors with `errors.As`
+ and `errors.Is`.
+
+
+v1.1.0 (2017-06-30)
+===================
+
+- Added an `Errors(error) []error` function to extract the underlying list of
+ errors for a multierr error.
+
+
+v1.0.0 (2017-05-31)
+===================
+
+No changes since v0.2.0. This release is committing to making no breaking
+changes to the current API in the 1.X series.
+
+
+v0.2.0 (2017-04-11)
+===================
+
+- Repeatedly appending to the same error is now faster due to fewer
+ allocations.
+
+
+v0.1.0 (2017-31-03)
+===================
+
+- Initial release
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
new file mode 100644
index 0000000..413e30f
--- /dev/null
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2017-2021 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
new file mode 100644
index 0000000..dcb6fe7
--- /dev/null
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -0,0 +1,38 @@
+# Directory to put `go install`ed binaries in.
+export GOBIN ?= $(shell pwd)/bin
+
+GO_FILES := $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false)
+
+.PHONY: golint
+golint:
+ @cd tools && go install golang.org/x/lint/golint
+ @$(GOBIN)/golint ./...
+
+.PHONY: staticcheck
+staticcheck:
+ @cd tools && go install honnef.co/go/tools/cmd/staticcheck
+ @$(GOBIN)/staticcheck ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck
+
+.PHONY: cover
+cover:
+ go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
+ go tool cover -html=cover.out -o cover.html
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
new file mode 100644
index 0000000..5ab6ac4
--- /dev/null
+++ b/vendor/go.uber.org/multierr/README.md
@@ -0,0 +1,43 @@
+# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+`multierr` allows combining one or more Go `error`s together.
+
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
+## Installation
+
+```bash
+go get -u go.uber.org/multierr@latest
+```
+
+## Status
+
+Stable: No breaking changes will be made before 2.0.
+
+-------------------------------------------------------------------------------
+
+Released under the [MIT License].
+
+[MIT License]: LICENSE.txt
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
+[doc]: https://pkg.go.dev/go.uber.org/multierr
+[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
+[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
+[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
+[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
new file mode 100644
index 0000000..3a828b2
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error.go
@@ -0,0 +1,646 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package multierr allows combining one or more errors together.
+//
+// # Overview
+//
+// Errors can be combined with the use of the Combine function.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
+//
+// If only two errors are being combined, the Append function may be used
+// instead.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// The underlying list of errors for a returned error object may be retrieved
+// with the Errors function.
+//
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
+//
+// # Appending from a loop
+//
+// You sometimes need to append into an error from a loop.
+//
+// var err error
+// for _, item := range items {
+// err = multierr.Append(err, process(item))
+// }
+//
+// Cases like this may require knowledge of whether an individual instance
+// failed. This usually requires introduction of a new variable.
+//
+// var err error
+// for _, item := range items {
+// if perr := process(item); perr != nil {
+// log.Warn("skipping item", item)
+// err = multierr.Append(err, perr)
+// }
+// }
+//
+// multierr includes AppendInto to simplify cases like this.
+//
+// var err error
+// for _, item := range items {
+// if multierr.AppendInto(&err, process(item)) {
+// log.Warn("skipping item", item)
+// }
+// }
+//
+// This will append the error into the err variable, and return true if that
+// individual error was non-nil.
+//
+// See [AppendInto] for more information.
+//
+// # Deferred Functions
+//
+// Go makes it possible to modify the return value of a function in a defer
+// block if the function was using named returns. This makes it possible to
+// record resource cleanup failures from deferred blocks.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
+//
+// multierr provides the Invoker type and AppendInvoke function to make cases
+// like the above simpler and obviate the need for a closure. The following is
+// roughly equivalent to the example above.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(conn))
+// // ...
+// }
+//
+// See [AppendInvoke] and [Invoker] for more information.
+//
+// NOTE: If you're modifying an error from inside a defer, you MUST use a named
+// return value for that function.
+//
+// # Advanced Usage
+//
+// Errors returned by Combine and Append MAY implement the following
+// interface.
+//
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
+//
+// Note that if you need access to list of errors behind a multierr error, you
+// should prefer using the Errors function. That said, if you need cheap
+// read-only access to the underlying errors slice, you can attempt to cast
+// the error to this interface. You MUST handle the failure case gracefully
+// because errors returned by Combine and Append are not guaranteed to
+// implement this interface.
+//
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
+package multierr // import "go.uber.org/multierr"
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+var (
+ // Separator for single-line error messages.
+ _singlelineSeparator = []byte("; ")
+
+ // Prefix for multi-line messages
+ _multilinePrefix = []byte("the following errors occurred:")
+
+ // Prefix for the first and following lines of an item in a list of
+ // multi-line error messages.
+ //
+ // For example, if a single item is:
+ //
+ // foo
+ // bar
+ //
+ // It will become,
+ //
+ // - foo
+ // bar
+ _multilineSeparator = []byte("\n - ")
+ _multilineIndent = []byte(" ")
+)
+
+// _bufferPool is a pool of bytes.Buffers.
+var _bufferPool = sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+}
+
+type errorGroup interface {
+ Errors() []error
+}
+
+// Errors returns a slice containing zero or more errors that the supplied
+// error is composed of. If the error is nil, a nil slice is returned.
+//
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
+//
+// If the error is not composed of other errors, the returned slice contains
+// just the error that was passed in.
+//
+// Callers of this function are free to modify the returned slice.
+func Errors(err error) []error {
+ return extractErrors(err)
+}
+
+// multiError is an error that holds one or more errors.
+//
+// An instance of this is guaranteed to be non-empty and flattened. That is,
+// none of the errors inside multiError are other multiErrors.
+//
+// multiError formats to a semi-colon delimited list of error messages with
+// %v and with a more readable multi-line format with %+v.
+type multiError struct {
+ copyNeeded atomic.Bool
+ errors []error
+}
+
+// Errors returns the list of underlying errors.
+//
+// This slice MUST NOT be modified.
+func (merr *multiError) Errors() []error {
+ if merr == nil {
+ return nil
+ }
+ return merr.errors
+}
+
+func (merr *multiError) Error() string {
+ if merr == nil {
+ return ""
+ }
+
+ buff := _bufferPool.Get().(*bytes.Buffer)
+ buff.Reset()
+
+ merr.writeSingleline(buff)
+
+ result := buff.String()
+ _bufferPool.Put(buff)
+ return result
+}
+
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+ for _, e := range extractErrors(err) {
+ if !errors.Is(e, target) {
+ return false
+ }
+ }
+ return true
+}
+
+func (merr *multiError) Format(f fmt.State, c rune) {
+ if c == 'v' && f.Flag('+') {
+ merr.writeMultiline(f)
+ } else {
+ merr.writeSingleline(f)
+ }
+}
+
+func (merr *multiError) writeSingleline(w io.Writer) {
+ first := true
+ for _, item := range merr.errors {
+ if first {
+ first = false
+ } else {
+ w.Write(_singlelineSeparator)
+ }
+ io.WriteString(w, item.Error())
+ }
+}
+
+func (merr *multiError) writeMultiline(w io.Writer) {
+ w.Write(_multilinePrefix)
+ for _, item := range merr.errors {
+ w.Write(_multilineSeparator)
+ writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
+ }
+}
+
+// Writes s to the writer with the given prefix added before each line after
+// the first.
+func writePrefixLine(w io.Writer, prefix []byte, s string) {
+ first := true
+ for len(s) > 0 {
+ if first {
+ first = false
+ } else {
+ w.Write(prefix)
+ }
+
+ idx := strings.IndexByte(s, '\n')
+ if idx < 0 {
+ idx = len(s) - 1
+ }
+
+ io.WriteString(w, s[:idx+1])
+ s = s[idx+1:]
+ }
+}
+
+type inspectResult struct {
+ // Number of top-level non-nil errors
+ Count int
+
+ // Total number of errors including multiErrors
+ Capacity int
+
+ // Index of the first non-nil error in the list. Value is meaningless if
+ // Count is zero.
+ FirstErrorIdx int
+
+ // Whether the list contains at least one multiError
+ ContainsMultiError bool
+}
+
+// Inspects the given slice of errors so that we can efficiently allocate
+// space for it.
+func inspect(errors []error) (res inspectResult) {
+ first := true
+ for i, err := range errors {
+ if err == nil {
+ continue
+ }
+
+ res.Count++
+ if first {
+ first = false
+ res.FirstErrorIdx = i
+ }
+
+ if merr, ok := err.(*multiError); ok {
+ res.Capacity += len(merr.errors)
+ res.ContainsMultiError = true
+ } else {
+ res.Capacity++
+ }
+ }
+ return
+}
+
+// fromSlice converts the given list of errors into a single error.
+func fromSlice(errors []error) error {
+ // Don't pay to inspect small slices.
+ switch len(errors) {
+ case 0:
+ return nil
+ case 1:
+ return errors[0]
+ }
+
+ res := inspect(errors)
+ switch res.Count {
+ case 0:
+ return nil
+ case 1:
+ // only one non-nil entry
+ return errors[res.FirstErrorIdx]
+ case len(errors):
+ if !res.ContainsMultiError {
+ // Error list is flat. Make a copy of it
+ // Otherwise "errors" escapes to the heap
+ // unconditionally for all other cases.
+ // This lets us optimize for the "no errors" case.
+ out := append(([]error)(nil), errors...)
+ return &multiError{errors: out}
+ }
+ }
+
+ nonNilErrs := make([]error, 0, res.Capacity)
+ for _, err := range errors[res.FirstErrorIdx:] {
+ if err == nil {
+ continue
+ }
+
+ if nested, ok := err.(*multiError); ok {
+ nonNilErrs = append(nonNilErrs, nested.errors...)
+ } else {
+ nonNilErrs = append(nonNilErrs, err)
+ }
+ }
+
+ return &multiError{errors: nonNilErrs}
+}
+
+// Combine combines the passed errors into a single error.
+//
+// If zero arguments were passed or if all items are nil, a nil error is
+// returned.
+//
+// Combine(nil, nil) // == nil
+//
+// If only a single error was passed, it is returned as-is.
+//
+// Combine(err) // == err
+//
+// Combine skips over nil arguments so this function may be used to combine
+// together errors from operations that fail independently of each other.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
+//
+// If any of the passed errors is a multierr error, it will be flattened along
+// with the other errors.
+//
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
+//
+// The returned error formats into a readable multi-line error message if
+// formatted with %+v.
+//
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+func Combine(errors ...error) error {
+ return fromSlice(errors)
+}
+
+// Append appends the given errors together. Either value may be nil.
+//
+// This function is a specialization of Combine for the common case where
+// there are only two errors.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// The following pattern may also be used to record failure of deferred
+// operations without losing information about the original error.
+//
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+//
+// Note that the variable MUST be a named return to append an error to it from
+// the defer statement. See also [AppendInvoke].
+func Append(left error, right error) error {
+ switch {
+ case left == nil:
+ return right
+ case right == nil:
+ return left
+ }
+
+ if _, ok := right.(*multiError); !ok {
+ if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
+ // Common case where the error on the left is constantly being
+ // appended to.
+ errs := append(l.errors, right)
+ return &multiError{errors: errs}
+ } else if !ok {
+ // Both errors are single errors.
+ return &multiError{errors: []error{left, right}}
+ }
+ }
+
+ // Either right or both, left and right, are multiErrors. Rely on usual
+ // expensive logic.
+ errors := [2]error{left, right}
+ return fromSlice(errors[0:])
+}
+
+// AppendInto appends an error into the destination of an error pointer and
+// returns whether the error being appended was non-nil.
+//
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
+//
+// The above is equivalent to,
+//
+// err := multierr.Append(r.Close(), w.Close())
+//
+// As AppendInto reports whether the provided error was non-nil, it may be
+// used to build a multierr error in a loop more ergonomically. For example:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
+//
+// Compare this with a version that relies solely on Append:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
+func AppendInto(into *error, err error) (errored bool) {
+ if into == nil {
+ // We panic if 'into' is nil. This is not documented above
+ // because suggesting that the pointer must be non-nil may
+ // confuse users into thinking that the error that it points
+ // to must be non-nil.
+ panic("misuse of multierr.AppendInto: into pointer must not be nil")
+ }
+
+ if err == nil {
+ return false
+ }
+ *into = Append(*into, err)
+ return true
+}
+
+// Invoker is an operation that may fail with an error. Use it with
+// AppendInvoke to append the result of calling the function into an error.
+// This allows you to conveniently defer capture of failing operations.
+//
+// See also, [Close] and [Invoke].
+type Invoker interface {
+ Invoke() error
+}
+
+// Invoke wraps a function which may fail with an error to match the Invoker
+// interface. Use it to supply functions matching this signature to
+// AppendInvoke.
+//
+// For example,
+//
+// func processReader(r io.Reader) (err error) {
+// scanner := bufio.NewScanner(r)
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// for scanner.Scan() {
+// // ...
+// }
+// // ...
+// }
+//
+// In this example, the following line will construct the Invoker right away,
+// but defer the invocation of scanner.Err() until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+type Invoke func() error
+
+// Invoke calls the supplied function and returns its result.
+func (i Invoke) Invoke() error { return i() }
+
+// Close builds an Invoker that closes the provided io.Closer. Use it with
+// AppendInvoke to close io.Closers and append their results into an error.
+//
+// For example,
+//
+// func processFile(path string) (err error) {
+// f, err := os.Open(path)
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// return processReader(f)
+// }
+//
+// In this example, multierr.Close will construct the Invoker right away, but
+// defer the invocation of f.Close until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+func Close(closer io.Closer) Invoker {
+ return Invoke(closer.Close)
+}
+
+// AppendInvoke appends the result of calling the given Invoker into the
+// provided error pointer. Use it with named returns to safely defer
+// invocation of fallible operations until a function returns, and capture the
+// resulting errors.
+//
+// func doSomething(...) (err error) {
+// // ...
+// f, err := openFile(..)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call f.Close() when this function returns and
+// // if the operation fails, its append its error into the
+// // returned error.
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// scanner := bufio.NewScanner(f)
+// // Similarly, this scheduled scanner.Err to be called and
+// // inspected when the function returns and append its error
+// // into the returned error.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// // ...
+// }
+//
+// NOTE: If used with a defer, the error variable MUST be a named return.
+//
+// Without defer, AppendInvoke behaves exactly like AppendInto.
+//
+// err := // ...
+// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+//
+// // ...is roughly equivalent to...
+//
+// err := // ...
+// multierr.AppendInto(&err, foo())
+//
+// The advantage of the indirection introduced by Invoker is to make it easy
+// to defer the invocation of a function. Without this indirection, the
+// invoked function will be evaluated at the time of the defer block rather
+// than when the function returns.
+//
+// // BAD: This is likely not what the caller intended. This will evaluate
+// // foo() right away and append its result into the error when the
+// // function returns.
+// defer multierr.AppendInto(&err, foo())
+//
+// // GOOD: This will defer invocation of foo unutil the function returns.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+//
+// multierr provides a few Invoker implementations out of the box for
+// convenience. See [Invoker] for more information.
+func AppendInvoke(into *error, invoker Invoker) {
+ AppendInto(into, invoker.Invoke())
+}
+
+// AppendFunc is a shorthand for [AppendInvoke].
+// It allows using function or method value directly
+// without having to wrap it into an [Invoker] interface.
+//
+// func doSomething(...) (err error) {
+// w, err := startWorker(...)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call w.Stop() when this function returns and
+// // if the operation fails, it appends its error into the
+// // returned error.
+// defer multierr.AppendFunc(&err, w.Stop)
+// }
+func AppendFunc(into *error, fn func() error) {
+ AppendInvoke(into, Invoke(fn))
+}
diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go
new file mode 100644
index 0000000..a173f9c
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.20
+// +build go1.20
+
+package multierr
+
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
+
+type multipleErrors interface {
+ Unwrap() []error
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // check if the given err is an Unwrapable error that
+ // implements multipleErrors interface.
+ eg, ok := err.(multipleErrors)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Unwrap()...)
+}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
new file mode 100644
index 0000000..93872a3
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !go1.20
+// +build !go1.20
+
+package multierr
+
+import "errors"
+
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml
new file mode 100644
index 0000000..8e5ca7d
--- /dev/null
+++ b/vendor/go.uber.org/zap/.codecov.yml
@@ -0,0 +1,17 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 95% # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+ignore:
+ - internal/readme/readme.go
+
diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore
new file mode 100644
index 0000000..da9d9d0
--- /dev/null
+++ b/vendor/go.uber.org/zap/.gitignore
@@ -0,0 +1,32 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+vendor
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.pprof
+*.out
+*.log
+
+/bin
+cover.out
+cover.html
diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml
new file mode 100644
index 0000000..2346df1
--- /dev/null
+++ b/vendor/go.uber.org/zap/.golangci.yml
@@ -0,0 +1,77 @@
+output:
+ # Make output more digestible with quickfix in vim/emacs/etc.
+ sort-results: true
+ print-issued-lines: false
+
+linters:
+ # We'll track the golangci-lint default linters manually
+ # instead of letting them change without our control.
+ disable-all: true
+ enable:
+ # golangci-lint defaults:
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+
+ # Our own extras:
+ - gofumpt
+ - nolintlint # lints nolint directives
+ - revive
+
+linters-settings:
+ govet:
+ # These govet checks are disabled by default, but they're useful.
+ enable:
+ - niliness
+ - reflectvaluecompare
+ - sortslice
+ - unusedwrite
+
+ errcheck:
+ exclude-functions:
+ # These methods can not fail.
+ # They operate on an in-memory buffer.
+ - (*go.uber.org/zap/buffer.Buffer).Write
+ - (*go.uber.org/zap/buffer.Buffer).WriteByte
+ - (*go.uber.org/zap/buffer.Buffer).WriteString
+
+ - (*go.uber.org/zap/zapio.Writer).Close
+ - (*go.uber.org/zap/zapio.Writer).Sync
+ - (*go.uber.org/zap/zapio.Writer).Write
+ # Write to zapio.Writer cannot fail,
+ # so io.WriteString on it cannot fail.
+ - io.WriteString(*go.uber.org/zap/zapio.Writer)
+
+ # Writing a plain string to a fmt.State cannot fail.
+ - io.WriteString(fmt.State)
+
+issues:
+ # Print all issues reported by all linters.
+ max-issues-per-linter: 0
+ max-same-issues: 0
+
+ # Don't ignore some of the issues that golangci-lint considers okay.
+ # This includes documenting all exported entities.
+ exclude-use-default: false
+
+ exclude-rules:
+ # Don't warn on unused parameters.
+ # Parameter names are useful; replacing them with '_' is undesirable.
+ - linters: [revive]
+ text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
+
+ # staticcheck already has smarter checks for empty blocks.
+ # revive's empty-block linter has false positives.
+ # For example, as of writing this, the following is not allowed.
+ # for foo() { }
+ - linters: [revive]
+ text: 'empty-block: this block is empty, you can remove it'
+
+ # Ignore logger.Sync() errcheck failures in example_test.go
+ # since those are intended to be uncomplicated examples.
+ - linters: [errcheck]
+ path: example_test.go
+ text: 'Error return value of `logger.Sync` is not checked'
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
new file mode 100644
index 0000000..4fea302
--- /dev/null
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -0,0 +1,117 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+
+Blazing fast, structured, leveled logging in Go.
+
+
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
+
+Log a message and 10 fields:
+
+{{.BenchmarkAddingFields}}
+
+Log a message with a logger that already has 10 fields of context:
+
+{{.BenchmarkAccumulatedContext}}
+
+Log a static string, without any context or `printf`-style templating:
+
+{{.BenchmarkWithoutFields}}
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+
+
+Released under the [MIT License](LICENSE).
+
+ In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
+
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
+
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
new file mode 100644
index 0000000..6d6cd5f
--- /dev/null
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -0,0 +1,687 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## 1.27.0 (20 Feb 2024)
+Enhancements:
+* [#1378][]: Add `WithLazy` method for `SugaredLogger`.
+* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`.
+* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`.
+* [#1416][]: Add `WithPanicHook` option for testing panic logs.
+
+Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release.
+
+[#1378]: https://github.com/uber-go/zap/pull/1378
+[#1399]: https://github.com/uber-go/zap/pull/1399
+[#1406]: https://github.com/uber-go/zap/pull/1406
+[#1416]: https://github.com/uber-go/zap/pull/1416
+
+## 1.26.0 (14 Sep 2023)
+Enhancements:
+* [#1297][]: Add Dict as a Field.
+* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
+context.
+* [#1350][]: String encoding is much (~50%) faster now.
+
+Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release.
+
+[#1297]: https://github.com/uber-go/zap/pull/1297
+[#1319]: https://github.com/uber-go/zap/pull/1319
+[#1350]: https://github.com/uber-go/zap/pull/1350
+
+## 1.25.0 (1 Aug 2023)
+
+This release contains several improvements including performance, API additions,
+and two new experimental packages whose APIs are unstable and may change in the
+future.
+
+Enhancements:
+* [#1246][]: Add `zap/exp/zapslog` package for integration with slog.
+* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
+* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
+`Str` and `Strs` for constructing String-like zap.Fields.
+* [#1310][]: Reduce stack size on `Any`.
+
+Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
+to this release.
+
+[#1246]: https://github.com/uber-go/zap/pull/1246
+[#1273]: https://github.com/uber-go/zap/pull/1273
+[#1281]: https://github.com/uber-go/zap/pull/1281
+[#1310]: https://github.com/uber-go/zap/pull/1310
+
+## 1.24.0 (30 Nov 2022)
+
+Enhancements:
+* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
+ current minimum enabled log level.
+* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
+
+Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
+contributions to this release.
+
+[#1148]: https://github.coml/uber-go/zap/pull/1148
+[#1185]: https://github.coml/uber-go/zap/pull/1185
+
+## 1.23.0 (24 Aug 2022)
+
+Enhancements:
+* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
+ `LevelEnabler` or `Core`.
+* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
+ that implement `String() string`.
+
+[#1147]: https://github.com/uber-go/zap/pull/1147
+[#1155]: https://github.com/uber-go/zap/pull/1155
+
+## 1.22.0 (8 Aug 2022)
+
+Enhancements:
+* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
+ arrays of objects. With these two constructors, you don't need to implement
+ `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
+ `zapcore.ObjectMarshaler`.
+* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
+ `SugaredLogger` with the provided options applied.
+* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
+ These functions provide a string joining behavior similar to `fmt.Println`.
+* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
+ logger for `Fatal`-level log entries. This defaults to exiting the program.
+* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
+ `NewDevelopment` to panic if the system was unable to build the logger.
+* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
+ a statement dynamically.
+
+Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
+for their contributions to this release.
+
+[#1071]: https://github.com/uber-go/zap/pull/1071
+[#1079]: https://github.com/uber-go/zap/pull/1079
+[#1080]: https://github.com/uber-go/zap/pull/1080
+[#1088]: https://github.com/uber-go/zap/pull/1088
+[#1108]: https://github.com/uber-go/zap/pull/1108
+[#1118]: https://github.com/uber-go/zap/pull/1118
+
+## 1.21.0 (7 Feb 2022)
+
+Enhancements:
+* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
+* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
+ string.
+
+Bugfixes:
+* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
+
+Other changes:
+* [#1052][]: Improve encoding performance when the `AddCaller` and
+ `AddStacktrace` options are used together.
+
+[#1047]: https://github.com/uber-go/zap/pull/1047
+[#1048]: https://github.com/uber-go/zap/pull/1048
+[#1052]: https://github.com/uber-go/zap/pull/1052
+[#1058]: https://github.com/uber-go/zap/pull/1058
+
+Thanks to @aerosol and @Techassi for their contributions to this release.
+
+## 1.20.0 (4 Jan 2022)
+
+Enhancements:
+* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
+ characters between log statements.
+* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
+ encoding of reflected log fields.
+
+Bugfixes:
+* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
+* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
+ methods when the methods return.
+* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
+
+Other changes:
+* [#1028][]: Drop support for Go < 1.15.
+
+[#554]: https://github.com/uber-go/zap/pull/554
+[#989]: https://github.com/uber-go/zap/pull/989
+[#1011]: https://github.com/uber-go/zap/pull/1011
+[#1017]: https://github.com/uber-go/zap/pull/1017
+[#1028]: https://github.com/uber-go/zap/pull/1028
+[#1033]: https://github.com/uber-go/zap/pull/1033
+[#1039]: https://github.com/uber-go/zap/pull/1039
+
+Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
+
+## 1.19.1 (8 Sep 2021)
+
+Bugfixes:
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
+
+## 1.19.0 (9 Aug 2021)
+
+Enhancements:
+* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
+* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
+ better.
+
+[#975]: https://github.com/uber-go/zap/pull/975
+[#984]: https://github.com/uber-go/zap/pull/984
+
+Thanks to @lancoLiu and @thockin for their contributions to this release.
+
+## 1.18.1 (28 Jun 2021)
+
+Bugfixes:
+* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`.
+
+[#974]: https://github.com/uber-go/zap/pull/974
+
+## 1.18.0 (28 Jun 2021)
+
+Enhancements:
+* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers
+ messages in-memory and flushes them periodically.
+* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`.
+* [#897][]: Add `zap.WithClock` option to control the source of time via the
+ new `zapcore.Clock` interface.
+* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w`
+ methods don't match expectations.
+* [#943][]: Add support for filtering by level or arbitrary matcher function to
+ `zaptest/observer`.
+* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's
+ `buffer.Buffer`.
+
+Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee
+for their contributions to this release.
+
+[#691]: https://github.com/uber-go/zap/pull/691
+[#897]: https://github.com/uber-go/zap/pull/897
+[#943]: https://github.com/uber-go/zap/pull/943
+[#949]: https://github.com/uber-go/zap/pull/949
+[#961]: https://github.com/uber-go/zap/pull/961
+[#971]: https://github.com/uber-go/zap/pull/971
+
+## 1.17.0 (25 May 2021)
+
+Bugfixes:
+* [#867][]: Encode `` for nil `error` instead of a panic.
+* [#931][], [#936][]: Update minimum version constraints to address
+ vulnerabilities in dependencies.
+
+Enhancements:
+* [#865][]: Improve alignment of fields of the Logger struct, reducing its
+ size from 96 to 80 bytes.
+* [#881][]: Support `grpclog.LoggerV2` in zapgrpc.
+* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler
+ with the `application/x-www-form-urlencoded` content type.
+* [#912][]: Support multi-field encoding with `zap.Inline`.
+* [#913][]: Speed up SugaredLogger for calls with a single string.
+* [#928][]: Add support for filtering by field name to `zaptest/observer`.
+
+Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
+
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
+
+## 1.16.0 (1 Sep 2020)
+
+Bugfixes:
+* [#828][]: Fix missing newline in IncreaseLevel error messages.
+* [#835][]: Fix panic in JSON encoder when encoding times or durations
+ without specifying a time or duration encoder.
+* [#843][]: Honor CallerSkip when taking stack traces.
+* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead.
+* [#854][]: Encode `` for nil `Stringer` instead of a panic error log.
+
+Enhancements:
+* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders
+ for custom layouts.
+* [#697][]: Added support for a configurable delimiter in the console encoder.
+* [#852][]: Optimize console encoder by pooling the underlying JSON encoder.
+* [#844][]: Add ability to include the calling function as part of logs.
+* [#843][]: Add `StackSkip` for including truncated stacks as a field.
+* [#861][]: Add options to customize Fatal behaviour for better testability.
+
+Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
+
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+
+## 1.15.0 (23 Apr 2020)
+
+Bugfixes:
+* [#804][]: Fix handling of `Time` values out of `UnixNano` range.
+* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`.
+
+Enhancements:
+* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This
+ allows disabling annotation of log entries with caller information if
+ previously enabled with `AddCaller`.
+* [#813][]: Deprecate `NewSampler` constructor in favor of
+ `NewSamplerWithOptions` which supports a `SamplerHook` option. This option
+ adds support for monitoring sampling decisions through a hook.
+
+Thanks to @danielbprice for their contributions to this release.
+
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+
+## 1.14.1 (14 Mar 2020)
+
+Bugfixes:
+* [#791][]: Fix panic on attempting to build a logger with an invalid Config.
+* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's
+ development-time dependencies.
+* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to
+ be generated for arrays of `time.Time` objects when using string-based time
+ formats.
+
+Thanks to @YashishDua for their contributions to this release.
+
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+
+## 1.14.0 (20 Feb 2020)
+
+Enhancements:
+* [#771][]: Optimize calls for disabled log levels.
+* [#773][]: Add millisecond duration encoder.
+* [#775][]: Add option to increase the level of a logger.
+* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible.
+
+Thanks to @caibirdme for their contributions to this release.
+
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+
+## 1.13.0 (13 Nov 2019)
+
+Enhancements:
+* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors
+ to log pointers to primitives with support for `nil` values.
+
+Thanks to @jbizzle for their contributions to this release.
+
+[#758]: https://github.com/uber-go/zap/pull/758
+
+## 1.12.0 (29 Oct 2019)
+
+Enhancements:
+* [#751][]: Migrate to Go modules.
+
+[#751]: https://github.com/uber-go/zap/pull/751
+
+## 1.11.0 (21 Oct 2019)
+
+Enhancements:
+* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`.
+* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders.
+
+Thanks to @juicemia, @uhthomas for their contributions to this release.
+
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+
+## 1.10.0 (29 Apr 2019)
+
+Bugfixes:
+* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a
+ string.
+* [#706][]: Fix incorrect call depth to determine caller in Go 1.12.
+
+Enhancements:
+* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test
+ loggers.
+* [#675][]: Don't panic when encoding a String field.
+* [#704][]: Disable HTML escaping for JSON objects encoded using the
+ reflect-based encoder.
+
+Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
+to this release.
+
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+
+## 1.9.1 (06 Aug 2018)
+
+Bugfixes:
+
+* [#614][]: MapObjectEncoder should not ignore empty slices.
+
+[#614]: https://github.com/uber-go/zap/pull/614
+
+## 1.9.0 (19 Jul 2018)
+
+Enhancements:
+* [#602][]: Reduce number of allocations when logging with reflection.
+* [#572][], [#606][]: Expose a registry for third-party logging sinks.
+
+Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
+@dimroc for their contributions to this release.
+
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+
+## 1.8.0 (13 Apr 2018)
+
+Enhancements:
+* [#508][]: Make log level configurable when redirecting the standard
+ library's logger.
+* [#518][]: Add a logger that writes to a `*testing.TB`.
+* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc.
+
+Bugfixes:
+* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`.
+
+Thanks to @DiSiqueira and @djui for their contributions to this release.
+
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+
+## 1.7.1 (25 Sep 2017)
+
+Bugfixes:
+* [#504][]: Store strings when using AddByteString with the map encoder.
+
+[#504]: https://github.com/uber-go/zap/pull/504
+
+## 1.7.0 (21 Sep 2017)
+
+Enhancements:
+
+* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
+ to specify the level of the logged messages.
+
+[#487]: https://github.com/uber-go/zap/pull/487
+
+## 1.6.0 (30 Aug 2017)
+
+Enhancements:
+
+* [#491][]: Omit zap stack frames from stacktraces.
+* [#490][]: Add a `ContextMap` method to observer logs for simpler
+ field validation in tests.
+
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+
+## 1.5.0 (22 Jul 2017)
+
+Enhancements:
+
+* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`.
+* [#465][]: Support user-supplied encoders for logger names.
+
+Bugfixes:
+
+* [#477][]: Fix a bug that incorrectly truncated deep stacktraces.
+
+Thanks to @richard-tunein and @pavius for their contributions to this release.
+
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+
+## 1.4.1 (08 Jun 2017)
+
+This release fixes two bugs.
+
+Bugfixes:
+
+* [#435][]: Support a variety of case conventions when unmarshaling levels.
+* [#444][]: Fix a panic in the observer.
+
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+
+## 1.4.0 (12 May 2017)
+
+This release adds a few small features and is fully backward-compatible.
+
+Enhancements:
+
+* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to
+ override the Unix-style default.
+* [#425][]: Preserve time zones when logging times.
+* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
+ variety of operations a bit simpler.
+
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+
+## 1.3.0 (25 Apr 2017)
+
+This release adds an enhancement to zap's testing helpers as well as the
+ability to marshal an AtomicLevel. It is fully backward-compatible.
+
+Enhancements:
+
+* [#415][]: Add a substring-filtering helper to zap's observer. This is
+ particularly useful when testing the `SugaredLogger`.
+* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
+
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+
+## 1.2.0 (13 Apr 2017)
+
+This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
+
+Enhancements:
+
+* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
+ `grpclog.Logger`.
+
+[#402]: https://github.com/uber-go/zap/pull/402
+
+## 1.1.0 (31 Mar 2017)
+
+This release fixes two bugs and adds some enhancements to zap's testing helpers.
+It is fully backward-compatible.
+
+Bugfixes:
+
+* [#385][]: Fix caller path trimming on Windows.
+* [#396][]: Fix a panic when attempting to use non-existent directories with
+ zap's configuration struct.
+
+Enhancements:
+
+* [#386][]: Add filtering helpers to zaptest's observing logger.
+
+Thanks to @moitias for contributing to this release.
+
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+
+## 1.0.0 (14 Mar 2017)
+
+This is zap's first stable release. All exported APIs are now final, and no
+further breaking changes will be made in the 1.x release series. Anyone using a
+semver-aware dependency manager should now pin to `^1`.
+
+Breaking changes:
+
+* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without
+ casting from `[]byte` to `string`.
+* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`,
+ `zap.Logger`, and `zap.SugaredLogger`.
+* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to
+ clash with other testing helpers.
+
+Bugfixes:
+
+* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier
+ for tab-separated console output.
+* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to
+ work with concurrency-safe `WriteSyncer` implementations.
+* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux
+ systems.
+* [#373][]: Report the correct caller from zap's standard library
+ interoperability wrappers.
+
+Enhancements:
+
+* [#348][]: Add a registry allowing third-party encodings to work with zap's
+ built-in `Config`.
+* [#327][]: Make the representation of logger callers configurable (like times,
+ levels, and durations).
+* [#376][]: Allow third-party encoders to use their own buffer pools, which
+ removes the last performance advantage that zap's encoders have over plugins.
+* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple
+ `WriteSyncer`s and lock the result.
+* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in
+ Go 1.9).
+* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it
+ easier for particularly punctilious users to unit test their application's
+ logging.
+
+Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
+contributions to this release.
+
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+
+## 1.0.0-rc.3 (7 Mar 2017)
+
+This is the third release candidate for zap's stable release. There are no
+breaking changes.
+
+Bugfixes:
+
+* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs
+ rather than `[]uint8`.
+
+Enhancements:
+
+* [#307][]: Users can opt into colored output for log levels.
+* [#353][]: In addition to hijacking the output of the standard library's
+ package-global logging functions, users can now construct a zap-backed
+ `log.Logger` instance.
+* [#311][]: Frames from common runtime functions and some of zap's internal
+ machinery are now omitted from stacktraces.
+
+Thanks to @ansel1 and @suyash for their contributions to this release.
+
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+
+## 1.0.0-rc.2 (21 Feb 2017)
+
+This is the second release candidate for zap's stable release. It includes two
+breaking changes.
+
+Breaking changes:
+
+* [#316][]: Zap's global loggers are now fully concurrency-safe
+ (previously, users had to ensure that `ReplaceGlobals` was called before the
+ loggers were in use). However, they must now be accessed via the `L()` and
+ `S()` functions. Users can update their projects with
+
+ ```
+ gofmt -r "zap.L -> zap.L()" -w .
+ gofmt -r "zap.S -> zap.S()" -w .
+ ```
+* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid
+ JSON and YAML struct tags on all config structs. This release fixes the tags
+ and adds static analysis to prevent similar bugs in the future.
+
+Bugfixes:
+
+* [#321][]: Redirecting the standard library's `log` output now
+ correctly reports the logger's caller.
+
+Enhancements:
+
+* [#325][] and [#333][]: Zap now transparently supports non-standard, rich
+ errors like those produced by `github.com/pkg/errors`.
+* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is
+ now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) ->
+ zap.NewNop()' -w .`.
+* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a
+ more informative error.
+
+Thanks to @skipor and @chapsuk for their contributions to this release.
+
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+
+## 1.0.0-rc.1 (14 Feb 2017)
+
+This is the first release candidate for zap's stable release. There are multiple
+breaking changes and improvements from the pre-release version. Most notably:
+
+* **Zap's import path is now "go.uber.org/zap"** — all users will
+ need to update their code.
+* User-facing types and functions remain in the `zap` package. Code relevant
+ largely to extension authors is now in the `zapcore` package.
+* The `zapcore.Core` type makes it easy for third-party packages to use zap's
+ internals but provide a different user-facing API.
+* `Logger` is now a concrete type instead of an interface.
+* A less verbose (though slower) logging API is included by default.
+* Package-global loggers `L` and `S` are included.
+* A human-friendly console encoder is included.
+* A declarative config struct allows common logger configurations to be managed
+ as configuration instead of code.
+* Sampling is more accurate, and doesn't depend on the standard library's shared
+ timer heap.
+
+## 0.1.0-beta.1 (6 Feb 2017)
+
+This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
+upgrade at their leisure. Since this is the first tagged release, there are no
+backward compatibility concerns and all functionality is new.
+
+Early zap adopters should pin to the 0.1.x minor version until they're ready to
+upgrade to the upcoming stable release.
diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..e327d9a
--- /dev/null
+++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
@@ -0,0 +1,75 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age,
+body size, disability, ethnicity, gender identity and expression, level of
+experience, nationality, personal appearance, race, religion, or sexual
+identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an
+appointed representative at an online or offline event. Representation of a
+project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at oss-conduct@uber.com. The project
+team will review and investigate all complaints, and will respond in a way
+that it deems appropriate to the circumstances. The project team is obligated
+to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at
+[http://contributor-covenant.org/version/1/4][version].
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
new file mode 100644
index 0000000..ea02f3c
--- /dev/null
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -0,0 +1,70 @@
+# Contributing
+
+We'd love your help making zap the very best structured logging library in Go!
+
+If you'd like to add new exported APIs, please [open an issue][open-issue]
+describing your proposal — discussing API changes ahead of time makes
+pull request review much smoother. In your issue, pull request, and any other
+communications, please remember to treat your fellow contributors with
+respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously.
+
+Note that you'll need to sign [Uber's Contributor License Agreement][cla]
+before we can accept any of your contributions. If necessary, a bot will remind
+you to accept the CLA when you open your pull request.
+
+## Setup
+
+[Fork][fork], then clone the repository:
+
+```bash
+mkdir -p $GOPATH/src/go.uber.org
+cd $GOPATH/src/go.uber.org
+git clone git@github.com:your_github_username/zap.git
+cd zap
+git remote add upstream https://github.com/uber-go/zap.git
+git fetch upstream
+```
+
+Make sure that the tests and the linters pass:
+
+```bash
+make test
+make lint
+```
+
+## Making Changes
+
+Start by creating a new branch for your changes:
+
+```bash
+cd $GOPATH/src/go.uber.org/zap
+git checkout master
+git fetch upstream
+git rebase upstream/master
+git checkout -b cool_new_feature
+```
+
+Make your changes, then ensure that `make lint` and `make test` still pass. If
+you're satisfied with your changes, push them to your fork.
+
+```bash
+git push origin cool_new_feature
+```
+
+Then use the GitHub UI to open a pull request.
+
+At this point, you're waiting on us to review your changes. We _try_ to respond
+to issues and pull requests within a few business days, and we may suggest some
+improvements or alternatives. Once your changes are approved, one of the
+project maintainers will merge them.
+
+We're much more likely to approve your changes if you:
+
+- Add tests for new functionality.
+- Write a [good commit message][commit-message].
+- Maintain backward compatibility.
+
+[fork]: https://github.com/uber-go/zap/fork
+[open-issue]: https://github.com/uber-go/zap/issues/new
+[cla]: https://cla-assistant.io/uber-go/zap
+[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md
new file mode 100644
index 0000000..b183b20
--- /dev/null
+++ b/vendor/go.uber.org/zap/FAQ.md
@@ -0,0 +1,164 @@
+# Frequently Asked Questions
+
+## Design
+
+### Why spend so much effort on logger performance?
+
+Of course, most applications won't notice the impact of a slow logger: they
+already take tens or hundreds of milliseconds for each operation, so an extra
+millisecond doesn't matter.
+
+On the other hand, why *not* make structured logging fast? The `SugaredLogger`
+isn't any harder to use than other logging packages, and the `Logger` makes
+structured logging possible in performance-sensitive contexts. Across a fleet
+of Go microservices, making each application even slightly more efficient adds
+up quickly.
+
+### Why aren't `Logger` and `SugaredLogger` interfaces?
+
+Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and
+`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points
+out][go-proverbs], "The bigger the interface, the weaker the abstraction."
+Interfaces are also rigid — *any* change requires releasing a new major
+version, since it breaks all third-party implementations.
+
+Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much
+abstraction, and it lets us add methods without introducing breaking changes.
+Your applications should define and depend upon an interface that includes
+just the methods you use.
+
+### Why are some of my logs missing?
+
+Logs are dropped intentionally by zap when sampling is enabled. The production
+configuration (as returned by `NewProductionConfig()` enables sampling which will
+cause repeated logs within a second to be sampled. See more details on why sampling
+is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs).
+
+### Why sample application logs?
+
+Applications often experience runs of errors, either because of a bug or
+because of a misbehaving user. Logging errors is usually a good idea, but it
+can easily make this bad situation worse: not only is your application coping
+with a flood of errors, it's also spending extra CPU cycles and I/O logging
+those errors. Since writes are typically serialized, logging limits throughput
+when you need it most.
+
+Sampling fixes this problem by dropping repetitive log entries. Under normal
+conditions, your application writes out every entry. When similar entries are
+logged hundreds or thousands of times each second, though, zap begins dropping
+duplicates to preserve throughput.
+
+### Why do the structured logging APIs take a message in addition to fields?
+
+Subjectively, we find it helpful to accompany structured context with a brief
+description. This isn't critical during development, but it makes debugging
+and operating unfamiliar systems much easier.
+
+More concretely, zap's sampling algorithm uses the message to identify
+duplicate entries. In our experience, this is a practical middle ground
+between random sampling (which often drops the exact entry that you need while
+debugging) and hashing the complete entry (which is prohibitively expensive).
+
+### Why include package-global loggers?
+
+Since so many other logging packages include a global logger, many
+applications aren't designed to accept loggers as explicit parameters.
+Changing function signatures is often a breaking change, so zap includes
+global loggers to simplify migration.
+
+Avoid them where possible.
+
+### Why include dedicated Panic and Fatal log levels?
+
+In general, application code should handle errors gracefully instead of using
+`panic` or `os.Exit`. However, every rule has exceptions, and it's common to
+crash when an error is truly unrecoverable. To avoid losing any information
+— especially the reason for the crash — the logger must flush any
+buffered entries before the process exits.
+
+Zap makes this easy by offering `Panic` and `Fatal` logging methods that
+automatically flush before exiting. Of course, this doesn't guarantee that
+logs will never be lost, but it eliminates a common error.
+
+See the discussion in uber-go/zap#207 for more details.
+
+### What's `DPanic`?
+
+`DPanic` stands for "panic in development." In development, it logs at
+`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to
+catch errors that are theoretically possible, but shouldn't actually happen,
+*without* crashing in production.
+
+If you've ever written code like this, you need `DPanic`:
+
+```go
+if err != nil {
+ panic(fmt.Sprintf("shouldn't ever get here: %v", err))
+}
+```
+
+## Installation
+
+### What does the error `expects import "go.uber.org/zap"` mean?
+
+Either zap was installed incorrectly or you're referencing the wrong package
+name in your code.
+
+Zap's source code happens to be hosted on GitHub, but the [import
+path][import-path] is `go.uber.org/zap`. This gives us, the project
+maintainers, the freedom to move the source code if necessary. However, it
+means that you need to take a little care when installing and using the
+package.
+
+If you follow two simple rules, everything should work: install zap with `go
+get -u go.uber.org/zap`, and always import it in your code with `import
+"go.uber.org/zap"`. Your code shouldn't contain *any* references to
+`github.com/uber-go/zap`.
+
+## Usage
+
+### Does zap support log rotation?
+
+Zap doesn't natively support rotating log files, since we prefer to leave this
+to an external program like `logrotate`.
+
+However, it's easy to integrate a log rotation package like
+[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`.
+
+```go
+// lumberjack.Logger is already safe for concurrent use, so we don't need to
+// lock it.
+w := zapcore.AddSync(&lumberjack.Logger{
+ Filename: "/var/log/myapp/foo.log",
+ MaxSize: 500, // megabytes
+ MaxBackups: 3,
+ MaxAge: 28, // days
+})
+core := zapcore.NewCore(
+ zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
+ w,
+ zap.InfoLevel,
+)
+logger := zap.New(core)
+```
+
+## Extensions
+
+We'd love to support every logging need within zap itself, but we're only
+familiar with a handful of log ingestion systems, flag-parsing packages, and
+the like. Rather than merging code that we can't effectively debug and
+support, we'd rather grow an ecosystem of zap extensions.
+
+We're aware of the following extensions, but haven't used them ourselves:
+
+| Package | Integration |
+| --- | --- |
+| `github.com/tchap/zapext` | Sentry, syslog |
+| `github.com/fgrosse/zaptest` | Ginkgo |
+| `github.com/blendle/zapdriver` | Stackdriver |
+| `github.com/moul/zapgorm` | Gorm |
+| `github.com/moul/zapfilter` | Advanced filtering rules |
+
+[go-proverbs]: https://go-proverbs.github.io/
+[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths
+[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2
diff --git a/vendor/go.uber.org/zap/LICENSE b/vendor/go.uber.org/zap/LICENSE
new file mode 100644
index 0000000..6652bed
--- /dev/null
+++ b/vendor/go.uber.org/zap/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2016-2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
new file mode 100644
index 0000000..eb1cee5
--- /dev/null
+++ b/vendor/go.uber.org/zap/Makefile
@@ -0,0 +1,76 @@
+# Directory containing the Makefile.
+PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+
+export GOBIN ?= $(PROJECT_ROOT)/bin
+export PATH := $(GOBIN):$(PATH)
+
+GOVULNCHECK = $(GOBIN)/govulncheck
+BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
+
+# Directories containing independent Go modules.
+MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
+
+# Directories that we want to track coverage for.
+COVER_DIRS = . ./exp
+
+.PHONY: all
+all: lint test
+
+.PHONY: lint
+lint: golangci-lint tidy-lint license-lint
+
+.PHONY: golangci-lint
+golangci-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] golangci-lint: $(mod)" && \
+ golangci-lint run --path-prefix $(mod)) &&) true
+
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULE_DIRS), \
+ (cd $(dir) && go mod tidy) &&) true
+
+.PHONY: tidy-lint
+tidy-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] tidy: $(mod)" && \
+ go mod tidy && \
+ git diff --exit-code -- go.mod go.sum) &&) true
+
+
+.PHONY: license-lint
+license-lint:
+ ./checklicense.sh
+
+$(GOVULNCHECK):
+ cd tools && go install golang.org/x/vuln/cmd/govulncheck
+
+.PHONY: test
+test:
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true
+
+.PHONY: cover
+cover:
+ @$(foreach dir,$(COVER_DIRS), ( \
+ cd $(dir) && \
+ go test -race -coverprofile=cover.out -coverpkg=./... ./... \
+ && go tool cover -html=cover.out -o cover.html) &&) true
+
+.PHONY: bench
+BENCH ?= .
+bench:
+ @$(foreach dir,$(MODULE_DIRS), ( \
+ cd $(dir) && \
+ go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \
+ ) &&) true
+
+.PHONY: updatereadme
+updatereadme:
+ rm -f README.md
+ cat .readme.tmpl | go run internal/readme/readme.go > README.md
+
+.PHONY: vulncheck
+vulncheck: $(GOVULNCHECK)
+ $(GOVULNCHECK) ./...
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
new file mode 100644
index 0000000..a17035c
--- /dev/null
+++ b/vendor/go.uber.org/zap/README.md
@@ -0,0 +1,149 @@
+# :zap: zap
+
+
+
+
+Blazing fast, structured, leveled logging in Go.
+
+
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
+
+Log a message and 10 fields:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 656 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op
+| zerolog | 380 ns/op | -42% | 1 allocs/op
+| go-kit | 2249 ns/op | +243% | 57 allocs/op
+| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op
+| slog | 2481 ns/op | +278% | 42 allocs/op
+| apex/log | 9591 ns/op | +1362% | 63 allocs/op
+| log15 | 11393 ns/op | +1637% | 75 allocs/op
+| logrus | 11654 ns/op | +1677% | 79 allocs/op
+
+Log a message with a logger that already has 10 fields of context:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 67 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op
+| zerolog | 35 ns/op | -48% | 0 allocs/op
+| slog | 193 ns/op | +188% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op
+| go-kit | 2460 ns/op | +3572% | 56 allocs/op
+| log15 | 9038 ns/op | +13390% | 70 allocs/op
+| apex/log | 9068 ns/op | +13434% | 53 allocs/op
+| logrus | 10521 ns/op | +15603% | 68 allocs/op
+
+Log a static string, without any context or `printf`-style templating:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 63 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op
+| zerolog | 32 ns/op | -49% | 0 allocs/op
+| standard library | 124 ns/op | +97% | 1 allocs/op
+| slog | 196 ns/op | +211% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op
+| go-kit | 213 ns/op | +238% | 9 allocs/op
+| apex/log | 771 ns/op | +1124% | 5 allocs/op
+| logrus | 1439 ns/op | +2184% | 23 allocs/op
+| log15 | 2069 ns/op | +3184% | 20 allocs/op
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+
+
+Released under the [MIT License](LICENSE).
+
+ In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
+
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
+
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
new file mode 100644
index 0000000..abfccb5
--- /dev/null
+++ b/vendor/go.uber.org/zap/array.go
@@ -0,0 +1,447 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Array constructs a field with the given key and ArrayMarshaler. It provides
+// a flexible, but still type-safe and efficient, way to add array-like types
+// to the logging context. The struct's MarshalLogArray method is called lazily.
+func Array(key string, val zapcore.ArrayMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val}
+}
+
+// Bools constructs a field that carries a slice of bools.
+func Bools(key string, bs []bool) Field {
+ return Array(key, bools(bs))
+}
+
+// ByteStrings constructs a field that carries a slice of []byte, each of which
+// must be UTF-8 encoded text.
+func ByteStrings(key string, bss [][]byte) Field {
+ return Array(key, byteStringsArray(bss))
+}
+
+// Complex128s constructs a field that carries a slice of complex numbers.
+func Complex128s(key string, nums []complex128) Field {
+ return Array(key, complex128s(nums))
+}
+
+// Complex64s constructs a field that carries a slice of complex numbers.
+func Complex64s(key string, nums []complex64) Field {
+ return Array(key, complex64s(nums))
+}
+
+// Durations constructs a field that carries a slice of time.Durations.
+func Durations(key string, ds []time.Duration) Field {
+ return Array(key, durations(ds))
+}
+
+// Float64s constructs a field that carries a slice of floats.
+func Float64s(key string, nums []float64) Field {
+ return Array(key, float64s(nums))
+}
+
+// Float32s constructs a field that carries a slice of floats.
+func Float32s(key string, nums []float32) Field {
+ return Array(key, float32s(nums))
+}
+
+// Ints constructs a field that carries a slice of integers.
+func Ints(key string, nums []int) Field {
+ return Array(key, ints(nums))
+}
+
+// Int64s constructs a field that carries a slice of integers.
+func Int64s(key string, nums []int64) Field {
+ return Array(key, int64s(nums))
+}
+
+// Int32s constructs a field that carries a slice of integers.
+func Int32s(key string, nums []int32) Field {
+ return Array(key, int32s(nums))
+}
+
+// Int16s constructs a field that carries a slice of integers.
+func Int16s(key string, nums []int16) Field {
+ return Array(key, int16s(nums))
+}
+
+// Int8s constructs a field that carries a slice of integers.
+func Int8s(key string, nums []int8) Field {
+ return Array(key, int8s(nums))
+}
+
+// Objects constructs a field with the given key, holding a list of the
+// provided objects that can be marshaled by Zap.
+//
+// Note that these objects must implement zapcore.ObjectMarshaler directly.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the Request type, not its pointer (*Request).
+// If it's on the pointer, use ObjectValues.
+//
+// Given an object that implements MarshalLogObject on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Author struct{ ... }
+// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var authors []Author = ...
+// logger.Info("loading article", zap.Objects("authors", authors))
+//
+// Similarly, given a type that implements MarshalLogObject on its pointer
+// receiver, you can log a slice of pointers to that object with Objects like
+// so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+//
+// If instead, you have a slice of values of such an object, use the
+// ObjectValues constructor.
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
+ return Array(key, objects[T](values))
+}
+
+type objects[T zapcore.ObjectMarshaler] []T
+
+func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ if err := arr.AppendObject(o); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ObjectMarshalerPtr is a constraint that specifies that the given type
+// implements zapcore.ObjectMarshaler on a pointer receiver.
+type ObjectMarshalerPtr[T any] interface {
+ *T
+ zapcore.ObjectMarshaler
+}
+
+// ObjectValues constructs a field with the given key, holding a list of the
+// provided objects, where pointers to these objects can be marshaled by Zap.
+//
+// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the *Request type, not the value (Request).
+// If it's on the value, use Objects.
+//
+// Given an object that implements MarshalLogObject on the pointer receiver,
+// you can log a slice of those objects with ObjectValues like so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+//
+// If instead, you have a slice of pointers of such an object, use the Objects
+// field constructor.
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
+ return Array(key, objectValues[T, P](values))
+}
+
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
+
+func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range os {
+ // It is necessary for us to explicitly reference the "P" type.
+ // We cannot simply pass "&os[i]" to AppendObject because its type
+ // is "*T", which the type system does not consider as
+ // implementing ObjectMarshaler.
+ // Only the type "P" satisfies ObjectMarshaler, which we have
+ // to convert "*T" to explicitly.
+ var p P = &os[i]
+ if err := arr.AppendObject(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Strings constructs a field that carries a slice of strings.
+func Strings(key string, ss []string) Field {
+ return Array(key, stringArray(ss))
+}
+
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Request struct{ ... }
+// func (a Request) String() string
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+ return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ arr.AppendString(o.String())
+ }
+ return nil
+}
+
+// Times constructs a field that carries a slice of time.Times.
+func Times(key string, ts []time.Time) Field {
+ return Array(key, times(ts))
+}
+
+// Uints constructs a field that carries a slice of unsigned integers.
+func Uints(key string, nums []uint) Field {
+ return Array(key, uints(nums))
+}
+
+// Uint64s constructs a field that carries a slice of unsigned integers.
+func Uint64s(key string, nums []uint64) Field {
+ return Array(key, uint64s(nums))
+}
+
+// Uint32s constructs a field that carries a slice of unsigned integers.
+func Uint32s(key string, nums []uint32) Field {
+ return Array(key, uint32s(nums))
+}
+
+// Uint16s constructs a field that carries a slice of unsigned integers.
+func Uint16s(key string, nums []uint16) Field {
+ return Array(key, uint16s(nums))
+}
+
+// Uint8s constructs a field that carries a slice of unsigned integers.
+func Uint8s(key string, nums []uint8) Field {
+ return Array(key, uint8s(nums))
+}
+
+// Uintptrs constructs a field that carries a slice of pointer addresses.
+func Uintptrs(key string, us []uintptr) Field {
+ return Array(key, uintptrs(us))
+}
+
+// Errors constructs a field that carries a slice of errors.
+func Errors(key string, errs []error) Field {
+ return Array(key, errArray(errs))
+}
+
+type bools []bool
+
+func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bs {
+ arr.AppendBool(bs[i])
+ }
+ return nil
+}
+
+type byteStringsArray [][]byte
+
+func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bss {
+ arr.AppendByteString(bss[i])
+ }
+ return nil
+}
+
+type complex128s []complex128
+
+func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex128(nums[i])
+ }
+ return nil
+}
+
+type complex64s []complex64
+
+func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex64(nums[i])
+ }
+ return nil
+}
+
+type durations []time.Duration
+
+func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ds {
+ arr.AppendDuration(ds[i])
+ }
+ return nil
+}
+
+type float64s []float64
+
+func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat64(nums[i])
+ }
+ return nil
+}
+
+type float32s []float32
+
+func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat32(nums[i])
+ }
+ return nil
+}
+
+type ints []int
+
+func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt(nums[i])
+ }
+ return nil
+}
+
+type int64s []int64
+
+func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt64(nums[i])
+ }
+ return nil
+}
+
+type int32s []int32
+
+func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt32(nums[i])
+ }
+ return nil
+}
+
+type int16s []int16
+
+func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt16(nums[i])
+ }
+ return nil
+}
+
+type int8s []int8
+
+func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt8(nums[i])
+ }
+ return nil
+}
+
+type stringArray []string
+
+func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ss {
+ arr.AppendString(ss[i])
+ }
+ return nil
+}
+
+type times []time.Time
+
+func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ts {
+ arr.AppendTime(ts[i])
+ }
+ return nil
+}
+
+type uints []uint
+
+func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint(nums[i])
+ }
+ return nil
+}
+
+type uint64s []uint64
+
+func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint64(nums[i])
+ }
+ return nil
+}
+
+type uint32s []uint32
+
+func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint32(nums[i])
+ }
+ return nil
+}
+
+type uint16s []uint16
+
+func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint16(nums[i])
+ }
+ return nil
+}
+
+type uint8s []uint8
+
+func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint8(nums[i])
+ }
+ return nil
+}
+
+type uintptrs []uintptr
+
+func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUintptr(nums[i])
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
new file mode 100644
index 0000000..0b8540c
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -0,0 +1,146 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package buffer provides a thin wrapper around a byte slice. Unlike the
+// standard library's bytes.Buffer, it supports a portion of the strconv
+// package's zero-allocation formatters.
+package buffer // import "go.uber.org/zap/buffer"
+
+import (
+ "strconv"
+ "time"
+)
+
+const _size = 1024 // by default, create 1 KiB buffers
+
+// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
+// the only way to construct one is via a Pool.
+type Buffer struct {
+ bs []byte
+ pool Pool
+}
+
+// AppendByte writes a single byte to the Buffer.
+func (b *Buffer) AppendByte(v byte) {
+ b.bs = append(b.bs, v)
+}
+
+// AppendBytes writes the given slice of bytes to the Buffer.
+func (b *Buffer) AppendBytes(v []byte) {
+ b.bs = append(b.bs, v...)
+}
+
+// AppendString writes a string to the Buffer.
+func (b *Buffer) AppendString(s string) {
+ b.bs = append(b.bs, s...)
+}
+
+// AppendInt appends an integer to the underlying buffer (assuming base 10).
+func (b *Buffer) AppendInt(i int64) {
+ b.bs = strconv.AppendInt(b.bs, i, 10)
+}
+
+// AppendTime appends the time formatted using the specified layout.
+func (b *Buffer) AppendTime(t time.Time, layout string) {
+ b.bs = t.AppendFormat(b.bs, layout)
+}
+
+// AppendUint appends an unsigned integer to the underlying buffer (assuming
+// base 10).
+func (b *Buffer) AppendUint(i uint64) {
+ b.bs = strconv.AppendUint(b.bs, i, 10)
+}
+
+// AppendBool appends a bool to the underlying buffer.
+func (b *Buffer) AppendBool(v bool) {
+ b.bs = strconv.AppendBool(b.bs, v)
+}
+
+// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
+// or +/- Inf.
+func (b *Buffer) AppendFloat(f float64, bitSize int) {
+ b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
+}
+
+// Len returns the length of the underlying byte slice.
+func (b *Buffer) Len() int {
+ return len(b.bs)
+}
+
+// Cap returns the capacity of the underlying byte slice.
+func (b *Buffer) Cap() int {
+ return cap(b.bs)
+}
+
+// Bytes returns a mutable reference to the underlying byte slice.
+func (b *Buffer) Bytes() []byte {
+ return b.bs
+}
+
+// String returns a string copy of the underlying byte slice.
+func (b *Buffer) String() string {
+ return string(b.bs)
+}
+
+// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
+// backing array.
+func (b *Buffer) Reset() {
+ b.bs = b.bs[:0]
+}
+
+// Write implements io.Writer.
+func (b *Buffer) Write(bs []byte) (int, error) {
+ b.bs = append(b.bs, bs...)
+ return len(bs), nil
+}
+
+// WriteByte writes a single byte to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteByte(v byte) error {
+ b.AppendByte(v)
+ return nil
+}
+
+// WriteString writes a string to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteString(s string) (int, error) {
+ b.AppendString(s)
+ return len(s), nil
+}
+
+// TrimNewline trims any final "\n" byte from the end of the buffer.
+func (b *Buffer) TrimNewline() {
+ if i := len(b.bs) - 1; i >= 0 {
+ if b.bs[i] == '\n' {
+ b.bs = b.bs[:i]
+ }
+ }
+}
+
+// Free returns the Buffer to its Pool.
+//
+// Callers must not retain references to the Buffer after calling Free.
+func (b *Buffer) Free() {
+ b.pool.put(b)
+}
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
new file mode 100644
index 0000000..8463233
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package buffer
+
+import (
+ "go.uber.org/zap/internal/pool"
+)
+
+// A Pool is a type-safe wrapper around a sync.Pool.
+type Pool struct {
+ p *pool.Pool[*Buffer]
+}
+
+// NewPool constructs a new Pool.
+func NewPool() Pool {
+ return Pool{
+ p: pool.New(func() *Buffer {
+ return &Buffer{
+ bs: make([]byte, 0, _size),
+ }
+ }),
+ }
+}
+
+// Get retrieves a Buffer from the pool, creating one if necessary.
+func (p Pool) Get() *Buffer {
+ buf := p.p.Get()
+ buf.Reset()
+ buf.pool = p
+ return buf
+}
+
+func (p Pool) put(buf *Buffer) {
+ p.p.Put(buf)
+}
diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh
new file mode 100644
index 0000000..345ac8b
--- /dev/null
+++ b/vendor/go.uber.org/zap/checklicense.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+ERROR_COUNT=0
+while read -r file
+do
+ case "$(head -1 "${file}")" in
+ *"Copyright (c) "*" Uber Technologies, Inc.")
+ # everything's cool
+ ;;
+ *)
+ echo "$file is missing license header."
+ (( ERROR_COUNT++ ))
+ ;;
+ esac
+done < <(git ls-files "*\.go")
+
+exit $ERROR_COUNT
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
new file mode 100644
index 0000000..e76e4e6
--- /dev/null
+++ b/vendor/go.uber.org/zap/config.go
@@ -0,0 +1,330 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "sort"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// SamplingConfig sets a sampling strategy for the logger. Sampling caps the
+// global CPU and I/O load that logging puts on your process while attempting
+// to preserve a representative subset of your logs.
+//
+// If specified, the Sampler will invoke the Hook after each decision.
+//
+// Values configured here are per-second. See zapcore.NewSamplerWithOptions for
+// details.
+type SamplingConfig struct {
+ Initial int `json:"initial" yaml:"initial"`
+ Thereafter int `json:"thereafter" yaml:"thereafter"`
+ Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"`
+}
+
+// Config offers a declarative way to construct a logger. It doesn't do
+// anything that can't be done with New, Options, and the various
+// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to
+// toggle common options.
+//
+// Note that Config intentionally supports only the most common options. More
+// unusual logging setups (logging to network connections or message queues,
+// splitting output between multiple files, etc.) are possible, but require
+// direct use of the zapcore package. For sample code, see the package-level
+// BasicConfiguration and AdvancedConfiguration examples.
+//
+// For an example showing runtime log level changes, see the documentation for
+// AtomicLevel.
+type Config struct {
+ // Level is the minimum enabled logging level. Note that this is a dynamic
+ // level, so calling Config.Level.SetLevel will atomically change the log
+ // level of all loggers descended from this config.
+ Level AtomicLevel `json:"level" yaml:"level"`
+ // Development puts the logger in development mode, which changes the
+ // behavior of DPanicLevel and takes stacktraces more liberally.
+ Development bool `json:"development" yaml:"development"`
+ // DisableCaller stops annotating logs with the calling function's file
+ // name and line number. By default, all logs are annotated.
+ DisableCaller bool `json:"disableCaller" yaml:"disableCaller"`
+ // DisableStacktrace completely disables automatic stacktrace capturing. By
+ // default, stacktraces are captured for WarnLevel and above logs in
+ // development and ErrorLevel and above in production.
+ DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"`
+ // Sampling sets a sampling policy. A nil SamplingConfig disables sampling.
+ Sampling *SamplingConfig `json:"sampling" yaml:"sampling"`
+ // Encoding sets the logger's encoding. Valid values are "json" and
+ // "console", as well as any third-party encodings registered via
+ // RegisterEncoder.
+ Encoding string `json:"encoding" yaml:"encoding"`
+ // EncoderConfig sets options for the chosen encoder. See
+ // zapcore.EncoderConfig for details.
+ EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
+ // OutputPaths is a list of URLs or file paths to write logging output to.
+ // See Open for details.
+ OutputPaths []string `json:"outputPaths" yaml:"outputPaths"`
+ // ErrorOutputPaths is a list of URLs to write internal logger errors to.
+ // The default is standard error.
+ //
+ // Note that this setting only affects internal errors; for sample code that
+ // sends error-level logs to a different location from info- and debug-level
+ // logs, see the package-level AdvancedConfiguration example.
+ ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"`
+ // InitialFields is a collection of fields to add to the root logger.
+ InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"`
+}
+
+// NewProductionEncoderConfig returns an opinionated EncoderConfig for
+// production environments.
+//
+// Messages encoded with this configuration will be JSON-formatted
+// and will have the following keys by default:
+//
+// - "level": The logging level (e.g. "info", "error").
+// - "ts": The current time in number of seconds since the Unix epoch.
+// - "msg": The message passed to the log statement.
+// - "caller": If available, a short path to the file and line number
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+// - "stacktrace": If available, a stack trace from the line
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+// - Time is formatted as floating-point number of seconds since the Unix
+// epoch.
+// - Duration is formatted as floating-point number of seconds.
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+// cfg := zap.NewProductionEncoderConfig()
+// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
+func NewProductionEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ TimeKey: "ts",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ FunctionKey: zapcore.OmitKey,
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.EpochTimeEncoder,
+ EncodeDuration: zapcore.SecondsDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewProductionConfig builds a reasonable default production logging
+// configuration.
+// Logging is enabled at InfoLevel and above, and uses a JSON encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of ErrorLevel and above.
+// DPanicLevel logs will not panic, but will write a stacktrace.
+//
+// Sampling is enabled at 100:100 by default,
+// meaning that after the first 100 log entries
+// with the same level and message in the same second,
+// it will log every 100th entry
+// with the same level and message in the same second.
+// You may disable this behavior by setting Sampling to nil.
+//
+// See [NewProductionEncoderConfig] for information
+// on the default encoder configuration.
+func NewProductionConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(InfoLevel),
+ Development: false,
+ Sampling: &SamplingConfig{
+ Initial: 100,
+ Thereafter: 100,
+ },
+ Encoding: "json",
+ EncoderConfig: NewProductionEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
+// development environments.
+//
+// Messages encoded with this configuration will use Zap's console encoder
+// intended to print human-readable output.
+// It will print log messages with the following information:
+//
+// - The log level (e.g. "INFO", "ERROR").
+// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+// - The message passed to the log statement.
+// - If available, a short path to the file and line number
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+// - If available, a stacktrace from the line
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+// - Duration is formatted as a string (e.g. "1.234s").
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+// cfg := zap.NewDevelopmentEncoderConfig()
+// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
+func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ // Keys can be anything except the empty string.
+ TimeKey: "T",
+ LevelKey: "L",
+ NameKey: "N",
+ CallerKey: "C",
+ FunctionKey: zapcore.OmitKey,
+ MessageKey: "M",
+ StacktraceKey: "S",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.CapitalLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewDevelopmentConfig builds a reasonable default development logging
+// configuration.
+// Logging is enabled at DebugLevel and above, and uses a console encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of WarnLevel and above.
+// DPanicLevel logs will panic.
+//
+// See [NewDevelopmentEncoderConfig] for information
+// on the default encoder configuration.
+func NewDevelopmentConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(DebugLevel),
+ Development: true,
+ Encoding: "console",
+ EncoderConfig: NewDevelopmentEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// Build constructs a logger from the Config and Options.
+func (cfg Config) Build(opts ...Option) (*Logger, error) {
+ enc, err := cfg.buildEncoder()
+ if err != nil {
+ return nil, err
+ }
+
+ sink, errSink, err := cfg.openSinks()
+ if err != nil {
+ return nil, err
+ }
+
+ if cfg.Level == (AtomicLevel{}) {
+ return nil, errors.New("missing Level")
+ }
+
+ log := New(
+ zapcore.NewCore(enc, sink, cfg.Level),
+ cfg.buildOptions(errSink)...,
+ )
+ if len(opts) > 0 {
+ log = log.WithOptions(opts...)
+ }
+ return log, nil
+}
+
+func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option {
+ opts := []Option{ErrorOutput(errSink)}
+
+ if cfg.Development {
+ opts = append(opts, Development())
+ }
+
+ if !cfg.DisableCaller {
+ opts = append(opts, AddCaller())
+ }
+
+ stackLevel := ErrorLevel
+ if cfg.Development {
+ stackLevel = WarnLevel
+ }
+ if !cfg.DisableStacktrace {
+ opts = append(opts, AddStacktrace(stackLevel))
+ }
+
+ if scfg := cfg.Sampling; scfg != nil {
+ opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core {
+ var samplerOpts []zapcore.SamplerOption
+ if scfg.Hook != nil {
+ samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook))
+ }
+ return zapcore.NewSamplerWithOptions(
+ core,
+ time.Second,
+ cfg.Sampling.Initial,
+ cfg.Sampling.Thereafter,
+ samplerOpts...,
+ )
+ }))
+ }
+
+ if len(cfg.InitialFields) > 0 {
+ fs := make([]Field, 0, len(cfg.InitialFields))
+ keys := make([]string, 0, len(cfg.InitialFields))
+ for k := range cfg.InitialFields {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ fs = append(fs, Any(k, cfg.InitialFields[k]))
+ }
+ opts = append(opts, Fields(fs...))
+ }
+
+ return opts
+}
+
+func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
+ sink, closeOut, err := Open(cfg.OutputPaths...)
+ if err != nil {
+ return nil, nil, err
+ }
+ errSink, _, err := Open(cfg.ErrorOutputPaths...)
+ if err != nil {
+ closeOut()
+ return nil, nil, err
+ }
+ return sink, errSink, nil
+}
+
+func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
+ return newEncoder(cfg.Encoding, cfg.EncoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
new file mode 100644
index 0000000..3c50d7b
--- /dev/null
+++ b/vendor/go.uber.org/zap/doc.go
@@ -0,0 +1,117 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zap provides fast, structured, leveled logging.
+//
+// For applications that log in the hot path, reflection-based serialization
+// and string formatting are prohibitively expensive - they're CPU-intensive
+// and make many small allocations. Put differently, using json.Marshal and
+// fmt.Fprintf to log tons of interface{} makes your application slow.
+//
+// Zap takes a different approach. It includes a reflection-free,
+// zero-allocation JSON encoder, and the base Logger strives to avoid
+// serialization overhead and allocations wherever possible. By building the
+// high-level SugaredLogger on that foundation, zap lets users choose when
+// they need to count every allocation and when they'd prefer a more familiar,
+// loosely typed API.
+//
+// # Choosing a Logger
+//
+// In contexts where performance is nice, but not critical, use the
+// SugaredLogger. It's 4-10x faster than other structured logging packages and
+// supports both structured and printf-style logging. Like log15 and go-kit,
+// the SugaredLogger's structured logging APIs are loosely typed and accept a
+// variadic number of key-value pairs. (For more advanced use cases, they also
+// accept strongly typed fields - see the SugaredLogger.With documentation for
+// details.)
+//
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// By default, loggers are unbuffered. However, since zap's low-level APIs
+// allow buffering, calling Sync before letting your process exit is a good
+// habit.
+//
+// In the rare contexts where every microsecond and every allocation matter,
+// use the Logger. It's even faster than the SugaredLogger and allocates far
+// less, but it only supports strongly-typed, structured logging.
+//
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
+//
+// Choosing between the Logger and SugaredLogger doesn't need to be an
+// application-wide decision: converting between the two is simple and
+// inexpensive.
+//
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// # Configuring Zap
+//
+// The simplest way to build a Logger is to use zap's opinionated presets:
+// NewExample, NewProduction, and NewDevelopment. These presets build a logger
+// with a single function call:
+//
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
+//
+// Presets are fine for small projects, but larger projects and organizations
+// naturally require a bit more customization. For most users, zap's Config
+// struct strikes the right balance between flexibility and convenience. See
+// the package-level BasicConfiguration example for sample code.
+//
+// More unusual configurations (splitting output between files, sending logs
+// to a message queue, etc.) are possible, but require direct use of
+// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
+// example for sample code.
+//
+// # Extending Zap
+//
+// The zap package itself is a relatively thin wrapper around the interfaces
+// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
+// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an
+// exception aggregation service, like Sentry or Rollbar) typically requires
+// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core
+// interfaces. See the zapcore documentation for details.
+//
+// Similarly, package authors can use the high-performance Encoder and Core
+// implementations in the zapcore package to build their own loggers.
+//
+// # Frequently Asked Questions
+//
+// An FAQ covering everything from installation errors to design decisions is
+// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
+package zap // import "go.uber.org/zap"
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
new file mode 100644
index 0000000..caa04ce
--- /dev/null
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var (
+ errNoEncoderNameSpecified = errors.New("no encoder name specified")
+
+ _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
+ "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewConsoleEncoder(encoderConfig), nil
+ },
+ "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewJSONEncoder(encoderConfig), nil
+ },
+ }
+ _encoderMutex sync.RWMutex
+)
+
+// RegisterEncoder registers an encoder constructor, which the Config struct
+// can then reference. By default, the "json" and "console" encoders are
+// registered.
+//
+// Attempting to register an encoder whose name is already taken returns an
+// error.
+func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error {
+ _encoderMutex.Lock()
+ defer _encoderMutex.Unlock()
+ if name == "" {
+ return errNoEncoderNameSpecified
+ }
+ if _, ok := _encoderNameToConstructor[name]; ok {
+ return fmt.Errorf("encoder already registered for name %q", name)
+ }
+ _encoderNameToConstructor[name] = constructor
+ return nil
+}
+
+func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
+ return nil, errors.New("missing EncodeTime in EncoderConfig")
+ }
+
+ _encoderMutex.RLock()
+ defer _encoderMutex.RUnlock()
+ if name == "" {
+ return nil, errNoEncoderNameSpecified
+ }
+ constructor, ok := _encoderNameToConstructor[name]
+ if !ok {
+ return nil, fmt.Errorf("no encoder registered for name %q", name)
+ }
+ return constructor(encoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
new file mode 100644
index 0000000..45f7b83
--- /dev/null
+++ b/vendor/go.uber.org/zap/error.go
@@ -0,0 +1,82 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "go.uber.org/zap/internal/pool"
+ "go.uber.org/zap/zapcore"
+)
+
+var _errArrayElemPool = pool.New(func() *errArrayElem {
+ return &errArrayElem{}
+})
+
+// Error is shorthand for the common idiom NamedError("error", err).
+func Error(err error) Field {
+ return NamedError("error", err)
+}
+
+// NamedError constructs a field that lazily stores err.Error() under the
+// provided key. Errors which also implement fmt.Formatter (like those produced
+// by github.com/pkg/errors) will also have their verbose representation stored
+// under key+"Verbose". If passed a nil error, the field is a no-op.
+//
+// For the common case in which the key is simply "error", the Error function
+// is shorter and less repetitive.
+func NamedError(key string, err error) Field {
+ if err == nil {
+ return Skip()
+ }
+ return Field{Key: key, Type: zapcore.ErrorType, Interface: err}
+}
+
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+ // To represent each error as an object with an "error" attribute and
+ // potentially an "errorVerbose" attribute, we need to wrap it in a
+ // type that implements LogObjectMarshaler. To prevent this from
+ // allocating, pool the wrapper type.
+ elem := _errArrayElemPool.Get()
+ elem.error = errs[i]
+ err := arr.AppendObject(elem)
+ elem.error = nil
+ _errArrayElemPool.Put(elem)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type errArrayElem struct {
+ error
+}
+
+func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ // Re-use the error field's logic, which supports non-standard error types.
+ Error(e.error).AddTo(enc)
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
new file mode 100644
index 0000000..6743930
--- /dev/null
+++ b/vendor/go.uber.org/zap/field.go
@@ -0,0 +1,615 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "go.uber.org/zap/internal/stacktrace"
+ "go.uber.org/zap/zapcore"
+)
+
+// Field is an alias for Field. Aliasing this type dramatically
+// improves the navigability of this package's API documentation.
+type Field = zapcore.Field
+
+var (
+ _minTimeInt64 = time.Unix(0, math.MinInt64)
+ _maxTimeInt64 = time.Unix(0, math.MaxInt64)
+)
+
+// Skip constructs a no-op field, which is often useful when handling invalid
+// inputs in other Field constructors.
+func Skip() Field {
+ return Field{Type: zapcore.SkipType}
+}
+
+// nilField returns a field which will marshal explicitly as nil. See motivation
+// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking
+// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the
+// implementation here should be changed to reflect that.
+func nilField(key string) Field { return Reflect(key, nil) }
+
+// Binary constructs a field that carries an opaque binary blob.
+//
+// Binary data is serialized in an encoding-appropriate format. For example,
+// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text,
+// use ByteString.
+func Binary(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.BinaryType, Interface: val}
+}
+
+// Bool constructs a field that carries a bool.
+func Bool(key string, val bool) Field {
+ var ival int64
+ if val {
+ ival = 1
+ }
+ return Field{Key: key, Type: zapcore.BoolType, Integer: ival}
+}
+
+// Boolp constructs a field that carries a *bool. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Boolp(key string, val *bool) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Bool(key, *val)
+}
+
+// ByteString constructs a field that carries UTF-8 encoded text as a []byte.
+// To log opaque binary blobs (which aren't necessarily valid UTF-8), use
+// Binary.
+func ByteString(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.ByteStringType, Interface: val}
+}
+
+// Complex128 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex128 to
+// interface{}).
+func Complex128(key string, val complex128) Field {
+ return Field{Key: key, Type: zapcore.Complex128Type, Interface: val}
+}
+
+// Complex128p constructs a field that carries a *complex128. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Complex128p(key string, val *complex128) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Complex128(key, *val)
+}
+
+// Complex64 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex64 to
+// interface{}).
+func Complex64(key string, val complex64) Field {
+ return Field{Key: key, Type: zapcore.Complex64Type, Interface: val}
+}
+
+// Complex64p constructs a field that carries a *complex64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Complex64p(key string, val *complex64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Complex64(key, *val)
+}
+
+// Float64 constructs a field that carries a float64. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float64(key string, val float64) Field {
+ return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))}
+}
+
+// Float64p constructs a field that carries a *float64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Float64p(key string, val *float64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Float64(key, *val)
+}
+
+// Float32 constructs a field that carries a float32. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float32(key string, val float32) Field {
+ return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))}
+}
+
+// Float32p constructs a field that carries a *float32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Float32p(key string, val *float32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Float32(key, *val)
+}
+
+// Int constructs a field with the given key and value.
+func Int(key string, val int) Field {
+ return Int64(key, int64(val))
+}
+
+// Intp constructs a field that carries a *int. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Intp(key string, val *int) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int(key, *val)
+}
+
+// Int64 constructs a field with the given key and value.
+func Int64(key string, val int64) Field {
+ return Field{Key: key, Type: zapcore.Int64Type, Integer: val}
+}
+
+// Int64p constructs a field that carries a *int64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int64p(key string, val *int64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int64(key, *val)
+}
+
+// Int32 constructs a field with the given key and value.
+func Int32(key string, val int32) Field {
+ return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)}
+}
+
+// Int32p constructs a field that carries a *int32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int32p(key string, val *int32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int32(key, *val)
+}
+
+// Int16 constructs a field with the given key and value.
+func Int16(key string, val int16) Field {
+ return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)}
+}
+
+// Int16p constructs a field that carries a *int16. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int16p(key string, val *int16) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int16(key, *val)
+}
+
+// Int8 constructs a field with the given key and value.
+func Int8(key string, val int8) Field {
+ return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)}
+}
+
+// Int8p constructs a field that carries a *int8. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int8p(key string, val *int8) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int8(key, *val)
+}
+
+// String constructs a field with the given key and value.
+func String(key string, val string) Field {
+ return Field{Key: key, Type: zapcore.StringType, String: val}
+}
+
+// Stringp constructs a field that carries a *string. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Stringp(key string, val *string) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return String(key, *val)
+}
+
+// Uint constructs a field with the given key and value.
+func Uint(key string, val uint) Field {
+ return Uint64(key, uint64(val))
+}
+
+// Uintp constructs a field that carries a *uint. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uintp(key string, val *uint) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint(key, *val)
+}
+
+// Uint64 constructs a field with the given key and value.
+func Uint64(key string, val uint64) Field {
+ return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)}
+}
+
+// Uint64p constructs a field that carries a *uint64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint64p(key string, val *uint64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint64(key, *val)
+}
+
+// Uint32 constructs a field with the given key and value.
+func Uint32(key string, val uint32) Field {
+ return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)}
+}
+
+// Uint32p constructs a field that carries a *uint32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint32p(key string, val *uint32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint32(key, *val)
+}
+
+// Uint16 constructs a field with the given key and value.
+func Uint16(key string, val uint16) Field {
+ return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)}
+}
+
+// Uint16p constructs a field that carries a *uint16. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint16p(key string, val *uint16) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint16(key, *val)
+}
+
+// Uint8 constructs a field with the given key and value.
+func Uint8(key string, val uint8) Field {
+ return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)}
+}
+
+// Uint8p constructs a field that carries a *uint8. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint8p(key string, val *uint8) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint8(key, *val)
+}
+
+// Uintptr constructs a field with the given key and value.
+func Uintptr(key string, val uintptr) Field {
+ return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)}
+}
+
+// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uintptrp(key string, val *uintptr) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uintptr(key, *val)
+}
+
+// Reflect constructs a field with the given key and an arbitrary object. It uses
+// an encoding-appropriate, reflection-based function to lazily serialize nearly
+// any object into the logging context, but it's relatively slow and
+// allocation-heavy. Outside tests, Any is always a better choice.
+//
+// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect
+// includes the error message in the final log output.
+func Reflect(key string, val interface{}) Field {
+ return Field{Key: key, Type: zapcore.ReflectType, Interface: val}
+}
+
+// Namespace creates a named, isolated scope within the logger's context. All
+// subsequent fields will be added to the new namespace.
+//
+// This helps prevent key collisions when injecting loggers into sub-components
+// or third-party libraries.
+func Namespace(key string) Field {
+ return Field{Key: key, Type: zapcore.NamespaceType}
+}
+
+// Stringer constructs a field with the given key and the output of the value's
+// String method. The Stringer's String method is called lazily.
+func Stringer(key string, val fmt.Stringer) Field {
+ return Field{Key: key, Type: zapcore.StringerType, Interface: val}
+}
+
+// Time constructs a Field with the given key and value. The encoder
+// controls how the time is serialized.
+func Time(key string, val time.Time) Field {
+ if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) {
+ return Field{Key: key, Type: zapcore.TimeFullType, Interface: val}
+ }
+ return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()}
+}
+
+// Timep constructs a field that carries a *time.Time. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Timep(key string, val *time.Time) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Time(key, *val)
+}
+
+// Stack constructs a field that stores a stacktrace of the current goroutine
+// under provided key. Keep in mind that taking a stacktrace is eager and
+// expensive (relatively speaking); this function both makes an allocation and
+// takes about two microseconds.
+func Stack(key string) Field {
+ return StackSkip(key, 1) // skip Stack
+}
+
+// StackSkip constructs a field similarly to Stack, but also skips the given
+// number of frames from the top of the stacktrace.
+func StackSkip(key string, skip int) Field {
+ // Returning the stacktrace as a string costs an allocation, but saves us
+ // from expanding the zapcore.Field union struct to include a byte slice. Since
+ // taking a stacktrace is already so expensive (~10us), the extra allocation
+ // is okay.
+ return String(key, stacktrace.Take(skip+1)) // skip StackSkip
+}
+
+// Duration constructs a field with the given key and value. The encoder
+// controls how the duration is serialized.
+func Duration(key string, val time.Duration) Field {
+ return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)}
+}
+
+// Durationp constructs a field that carries a *time.Duration. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Durationp(key string, val *time.Duration) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Duration(key, *val)
+}
+
+// Object constructs a field with the given key and ObjectMarshaler. It
+// provides a flexible, but still type-safe and efficient, way to add map- or
+// struct-like user-defined types to the logging context. The struct's
+// MarshalLogObject method is called lazily.
+func Object(key string, val zapcore.ObjectMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val}
+}
+
+// Inline constructs a Field that is similar to Object, but it
+// will add the elements of the provided ObjectMarshaler to the
+// current namespace.
+func Inline(val zapcore.ObjectMarshaler) Field {
+ return zapcore.Field{
+ Type: zapcore.InlineMarshalerType,
+ Interface: val,
+ }
+}
+
+// Dict constructs a field containing the provided key-value pairs.
+// It acts similar to [Object], but with the fields specified as arguments.
+func Dict(key string, val ...Field) Field {
+ return dictField(key, val)
+}
+
+// We need a function with the signature (string, T) for zap.Any.
+func dictField(key string, val []Field) Field {
+ return Object(key, dictObject(val))
+}
+
+type dictObject []Field
+
+func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ for _, f := range d {
+ f.AddTo(enc)
+ }
+ return nil
+}
+
+// We discovered an issue where zap.Any can cause a performance degradation
+// when used in new goroutines.
+//
+// This happens because the compiler assigns 4.8kb (one zap.Field per arm of
+// switch statement) of stack space for zap.Any when it takes the form:
+//
+// switch v := v.(type) {
+// case string:
+// return String(key, v)
+// case int:
+// return Int(key, v)
+// // ...
+// default:
+// return Reflect(key, v)
+// }
+//
+// To avoid this, we use the type switch to assign a value to a single local variable
+// and then call a function on it.
+// The local variable is just a function reference so it doesn't allocate
+// when converted to an interface{}.
+//
+// A fair bit of experimentation went into this.
+// See also:
+//
+// - https://github.com/uber-go/zap/pull/1301
+// - https://github.com/uber-go/zap/pull/1303
+// - https://github.com/uber-go/zap/pull/1304
+// - https://github.com/uber-go/zap/pull/1305
+// - https://github.com/uber-go/zap/pull/1308
+//
+// See https://github.com/golang/go/issues/62077 for upstream issue.
+type anyFieldC[T any] func(string, T) Field
+
+func (f anyFieldC[T]) Any(key string, val any) Field {
+ v, _ := val.(T)
+ // val is guaranteed to be a T, except when it's nil.
+ return f(key, v)
+}
+
+// Any takes a key and an arbitrary value and chooses the best way to represent
+// them as a field, falling back to a reflection-based approach only if
+// necessary.
+//
+// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between
+// them. To minimize surprises, []byte values are treated as binary blobs, byte
+// values are treated as uint8, and runes are always treated as integers.
+func Any(key string, value interface{}) Field {
+ var c interface{ Any(string, any) Field }
+
+ switch value.(type) {
+ case zapcore.ObjectMarshaler:
+ c = anyFieldC[zapcore.ObjectMarshaler](Object)
+ case zapcore.ArrayMarshaler:
+ c = anyFieldC[zapcore.ArrayMarshaler](Array)
+ case []Field:
+ c = anyFieldC[[]Field](dictField)
+ case bool:
+ c = anyFieldC[bool](Bool)
+ case *bool:
+ c = anyFieldC[*bool](Boolp)
+ case []bool:
+ c = anyFieldC[[]bool](Bools)
+ case complex128:
+ c = anyFieldC[complex128](Complex128)
+ case *complex128:
+ c = anyFieldC[*complex128](Complex128p)
+ case []complex128:
+ c = anyFieldC[[]complex128](Complex128s)
+ case complex64:
+ c = anyFieldC[complex64](Complex64)
+ case *complex64:
+ c = anyFieldC[*complex64](Complex64p)
+ case []complex64:
+ c = anyFieldC[[]complex64](Complex64s)
+ case float64:
+ c = anyFieldC[float64](Float64)
+ case *float64:
+ c = anyFieldC[*float64](Float64p)
+ case []float64:
+ c = anyFieldC[[]float64](Float64s)
+ case float32:
+ c = anyFieldC[float32](Float32)
+ case *float32:
+ c = anyFieldC[*float32](Float32p)
+ case []float32:
+ c = anyFieldC[[]float32](Float32s)
+ case int:
+ c = anyFieldC[int](Int)
+ case *int:
+ c = anyFieldC[*int](Intp)
+ case []int:
+ c = anyFieldC[[]int](Ints)
+ case int64:
+ c = anyFieldC[int64](Int64)
+ case *int64:
+ c = anyFieldC[*int64](Int64p)
+ case []int64:
+ c = anyFieldC[[]int64](Int64s)
+ case int32:
+ c = anyFieldC[int32](Int32)
+ case *int32:
+ c = anyFieldC[*int32](Int32p)
+ case []int32:
+ c = anyFieldC[[]int32](Int32s)
+ case int16:
+ c = anyFieldC[int16](Int16)
+ case *int16:
+ c = anyFieldC[*int16](Int16p)
+ case []int16:
+ c = anyFieldC[[]int16](Int16s)
+ case int8:
+ c = anyFieldC[int8](Int8)
+ case *int8:
+ c = anyFieldC[*int8](Int8p)
+ case []int8:
+ c = anyFieldC[[]int8](Int8s)
+ case string:
+ c = anyFieldC[string](String)
+ case *string:
+ c = anyFieldC[*string](Stringp)
+ case []string:
+ c = anyFieldC[[]string](Strings)
+ case uint:
+ c = anyFieldC[uint](Uint)
+ case *uint:
+ c = anyFieldC[*uint](Uintp)
+ case []uint:
+ c = anyFieldC[[]uint](Uints)
+ case uint64:
+ c = anyFieldC[uint64](Uint64)
+ case *uint64:
+ c = anyFieldC[*uint64](Uint64p)
+ case []uint64:
+ c = anyFieldC[[]uint64](Uint64s)
+ case uint32:
+ c = anyFieldC[uint32](Uint32)
+ case *uint32:
+ c = anyFieldC[*uint32](Uint32p)
+ case []uint32:
+ c = anyFieldC[[]uint32](Uint32s)
+ case uint16:
+ c = anyFieldC[uint16](Uint16)
+ case *uint16:
+ c = anyFieldC[*uint16](Uint16p)
+ case []uint16:
+ c = anyFieldC[[]uint16](Uint16s)
+ case uint8:
+ c = anyFieldC[uint8](Uint8)
+ case *uint8:
+ c = anyFieldC[*uint8](Uint8p)
+ case []byte:
+ c = anyFieldC[[]byte](Binary)
+ case uintptr:
+ c = anyFieldC[uintptr](Uintptr)
+ case *uintptr:
+ c = anyFieldC[*uintptr](Uintptrp)
+ case []uintptr:
+ c = anyFieldC[[]uintptr](Uintptrs)
+ case time.Time:
+ c = anyFieldC[time.Time](Time)
+ case *time.Time:
+ c = anyFieldC[*time.Time](Timep)
+ case []time.Time:
+ c = anyFieldC[[]time.Time](Times)
+ case time.Duration:
+ c = anyFieldC[time.Duration](Duration)
+ case *time.Duration:
+ c = anyFieldC[*time.Duration](Durationp)
+ case []time.Duration:
+ c = anyFieldC[[]time.Duration](Durations)
+ case error:
+ c = anyFieldC[error](NamedError)
+ case []error:
+ c = anyFieldC[[]error](Errors)
+ case fmt.Stringer:
+ c = anyFieldC[fmt.Stringer](Stringer)
+ default:
+ c = anyFieldC[any](Reflect)
+ }
+
+ return c.Any(key, value)
+}
diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go
new file mode 100644
index 0000000..1312875
--- /dev/null
+++ b/vendor/go.uber.org/zap/flag.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "flag"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// LevelFlag uses the standard library's flag.Var to declare a global flag
+// with the specified name, default, and usage guidance. The returned value is
+// a pointer to the value of the flag.
+//
+// If you don't want to use the flag package's global state, you can use any
+// non-nil *Level as a flag.Value with your own *flag.FlagSet.
+func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level {
+ lvl := defaultLevel
+ flag.Var(&lvl, name, usage)
+ return &lvl
+}
diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml
new file mode 100644
index 0000000..8e1d05e
--- /dev/null
+++ b/vendor/go.uber.org/zap/glide.yaml
@@ -0,0 +1,34 @@
+package: go.uber.org/zap
+license: MIT
+import:
+- package: go.uber.org/atomic
+ version: ^1
+- package: go.uber.org/multierr
+ version: ^1
+testImport:
+- package: github.com/satori/go.uuid
+- package: github.com/sirupsen/logrus
+- package: github.com/apex/log
+ subpackages:
+ - handlers/json
+- package: github.com/go-kit/kit
+ subpackages:
+ - log
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
+- package: gopkg.in/inconshreveable/log15.v2
+- package: github.com/mattn/goveralls
+- package: github.com/pborman/uuid
+- package: github.com/pkg/errors
+- package: github.com/rs/zerolog
+- package: golang.org/x/tools
+ subpackages:
+ - cover
+- package: golang.org/x/lint
+ subpackages:
+ - golint
+- package: github.com/axw/gocov
+ subpackages:
+ - gocov
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
new file mode 100644
index 0000000..3cb46c9
--- /dev/null
+++ b/vendor/go.uber.org/zap/global.go
@@ -0,0 +1,169 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ _stdLogDefaultDepth = 1
+ _loggerWriterDepth = 2
+ _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
+ "https://github.com/uber-go/zap/issues/new and reference this error: %v"
+)
+
+var (
+ _globalMu sync.RWMutex
+ _globalL = NewNop()
+ _globalS = _globalL.Sugar()
+)
+
+// L returns the global Logger, which can be reconfigured with ReplaceGlobals.
+// It's safe for concurrent use.
+func L() *Logger {
+ _globalMu.RLock()
+ l := _globalL
+ _globalMu.RUnlock()
+ return l
+}
+
+// S returns the global SugaredLogger, which can be reconfigured with
+// ReplaceGlobals. It's safe for concurrent use.
+func S() *SugaredLogger {
+ _globalMu.RLock()
+ s := _globalS
+ _globalMu.RUnlock()
+ return s
+}
+
+// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a
+// function to restore the original values. It's safe for concurrent use.
+func ReplaceGlobals(logger *Logger) func() {
+ _globalMu.Lock()
+ prev := _globalL
+ _globalL = logger
+ _globalS = logger.Sugar()
+ _globalMu.Unlock()
+ return func() { ReplaceGlobals(prev) }
+}
+
+// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at
+// InfoLevel. To redirect the standard library's package-global logging
+// functions, use RedirectStdLog instead.
+func NewStdLog(l *Logger) *log.Logger {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ f := logger.Info
+ return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */)
+}
+
+// NewStdLogAt returns *log.Logger which writes to supplied zap logger at
+// required level.
+func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil
+}
+
+// RedirectStdLog redirects output from the standard library's package-global
+// logger to the supplied logger at InfoLevel. Since zap already handles caller
+// annotations, timestamps, etc., it automatically disables the standard
+// library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLog(l *Logger) func() {
+ f, err := redirectStdLogAt(l, InfoLevel)
+ if err != nil {
+ // Can't get here, since passing InfoLevel to redirectStdLogAt always
+ // works.
+ panic(fmt.Sprintf(_programmerErrorTemplate, err))
+ }
+ return f
+}
+
+// RedirectStdLogAt redirects output from the standard library's package-global
+// logger to the supplied logger at the specified level. Since zap already
+// handles caller annotations, timestamps, etc., it automatically disables the
+// standard library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ return redirectStdLogAt(l, level)
+}
+
+func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ flags := log.Flags()
+ prefix := log.Prefix()
+ log.SetFlags(0)
+ log.SetPrefix("")
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ log.SetOutput(&loggerWriter{logFunc})
+ return func() {
+ log.SetFlags(flags)
+ log.SetPrefix(prefix)
+ log.SetOutput(os.Stderr)
+ }, nil
+}
+
+func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) {
+ switch lvl {
+ case DebugLevel:
+ return logger.Debug, nil
+ case InfoLevel:
+ return logger.Info, nil
+ case WarnLevel:
+ return logger.Warn, nil
+ case ErrorLevel:
+ return logger.Error, nil
+ case DPanicLevel:
+ return logger.DPanic, nil
+ case PanicLevel:
+ return logger.Panic, nil
+ case FatalLevel:
+ return logger.Fatal, nil
+ }
+ return nil, fmt.Errorf("unrecognized level: %q", lvl)
+}
+
+type loggerWriter struct {
+ logFunc func(msg string, fields ...Field)
+}
+
+func (l *loggerWriter) Write(p []byte) (int, error) {
+ p = bytes.TrimSpace(p)
+ l.logFunc(string(p))
+ return len(p), nil
+}
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
new file mode 100644
index 0000000..2be8f65
--- /dev/null
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -0,0 +1,140 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// ServeHTTP is a simple JSON endpoint that can report on or change the current
+// logging level.
+//
+// # GET
+//
+// The GET request returns a JSON description of the current logging level like:
+//
+// {"level":"info"}
+//
+// # PUT
+//
+// The PUT request changes the logging level. It is perfectly safe to change the
+// logging level while a program is running. Two content types are supported:
+//
+// Content-Type: application/x-www-form-urlencoded
+//
+// With this content type, the level can be provided through the request body or
+// a query parameter. The log level is URL encoded like:
+//
+// level=debug
+//
+// The request body takes precedence over the query parameter, if both are
+// specified.
+//
+// This content type is the default for a curl PUT request. Following are two
+// example curl requests that both set the logging level to debug.
+//
+// curl -X PUT localhost:8080/log/level?level=debug
+// curl -X PUT localhost:8080/log/level -d level=debug
+//
+// For any other content type, the payload is expected to be JSON encoded and
+// look like:
+//
+// {"level":"info"}
+//
+// An example curl request could look like this:
+//
+// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
+func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if err := lvl.serveHTTP(w, r); err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "internal error: %v", err)
+ }
+}
+
+func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
+ type errorResponse struct {
+ Error string `json:"error"`
+ }
+ type payload struct {
+ Level zapcore.Level `json:"level"`
+ }
+
+ enc := json.NewEncoder(w)
+
+ switch r.Method {
+ case http.MethodGet:
+ return enc.Encode(payload{Level: lvl.Level()})
+
+ case http.MethodPut:
+ requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ return enc.Encode(errorResponse{Error: err.Error()})
+ }
+ lvl.SetLevel(requestedLvl)
+ return enc.Encode(payload{Level: lvl.Level()})
+
+ default:
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return enc.Encode(errorResponse{
+ Error: "Only GET and PUT are supported.",
+ })
+ }
+}
+
+// Decodes incoming PUT requests and returns the requested logging level.
+func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) {
+ if contentType == "application/x-www-form-urlencoded" {
+ return decodePutURL(r)
+ }
+ return decodePutJSON(r.Body)
+}
+
+func decodePutURL(r *http.Request) (zapcore.Level, error) {
+ lvl := r.FormValue("level")
+ if lvl == "" {
+ return 0, errors.New("must specify logging level")
+ }
+ var l zapcore.Level
+ if err := l.UnmarshalText([]byte(lvl)); err != nil {
+ return 0, err
+ }
+ return l, nil
+}
+
+func decodePutJSON(body io.Reader) (zapcore.Level, error) {
+ var pld struct {
+ Level *zapcore.Level `json:"level"`
+ }
+ if err := json.NewDecoder(body).Decode(&pld); err != nil {
+ return 0, fmt.Errorf("malformed request body: %v", err)
+ }
+ if pld.Level == nil {
+ return 0, errors.New("must specify logging level")
+ }
+ return *pld.Level, nil
+}
diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
new file mode 100644
index 0000000..dad583a
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package bufferpool houses zap's shared internal buffer pool. Third-party
+// packages can recreate the same functionality with buffers.NewPool.
+package bufferpool
+
+import "go.uber.org/zap/buffer"
+
+var (
+ _pool = buffer.NewPool()
+ // Get retrieves a buffer from the pool, creating one if necessary.
+ Get = _pool.Get
+)
diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go
new file mode 100644
index 0000000..c4d5d02
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/color/color.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package color adds coloring functionality for TTY output.
+package color
+
+import "fmt"
+
+// Foreground colors.
+const (
+ Black Color = iota + 30
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ White
+)
+
+// Color represents a text color.
+type Color uint8
+
+// Add adds the coloring to the given string.
+func (c Color) Add(s string) string {
+ return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s)
+}
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
new file mode 100644
index 0000000..f673f99
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -0,0 +1,66 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package exit provides stubs so that unit tests can exercise code that calls
+// os.Exit(1).
+package exit
+
+import "os"
+
+var _exit = os.Exit
+
+// With terminates the process by calling os.Exit(code). If the package is
+// stubbed, it instead records a call in the testing spy.
+func With(code int) {
+ _exit(code)
+}
+
+// A StubbedExit is a testing fake for os.Exit.
+type StubbedExit struct {
+ Exited bool
+ Code int
+ prev func(code int)
+}
+
+// Stub substitutes a fake for the call to os.Exit(1).
+func Stub() *StubbedExit {
+ s := &StubbedExit{prev: _exit}
+ _exit = s.exit
+ return s
+}
+
+// WithStub runs the supplied function with Exit stubbed. It returns the stub
+// used, so that users can test whether the process would have crashed.
+func WithStub(f func()) *StubbedExit {
+ s := Stub()
+ defer s.Unstub()
+ f()
+ return s
+}
+
+// Unstub restores the previous exit function.
+func (se *StubbedExit) Unstub() {
+ _exit = se.prev
+}
+
+func (se *StubbedExit) exit(code int) {
+ se.Exited = true
+ se.Code = code
+}
diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go
new file mode 100644
index 0000000..40bfed8
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/level_enabler.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package internal and its subpackages hold types and functionality
+// that are not part of Zap's public API.
+package internal
+
+import "go.uber.org/zap/zapcore"
+
+// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
+// report their own level.
+//
+// This interface is defined to use more conveniently in tests and non-zapcore
+// packages.
+// This cannot be imported from zapcore because of the cyclic dependency.
+type LeveledEnabler interface {
+ zapcore.LevelEnabler
+
+ Level() zapcore.Level
+}
diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go
new file mode 100644
index 0000000..60e9d2c
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/pool/pool.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package pool provides internal pool utilities.
+package pool
+
+import (
+ "sync"
+)
+
+// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed
+// object pooling.
+//
+// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will
+// not be detected, so all internal pool use must take care to only store
+// pointer types.
+type Pool[T any] struct {
+ pool sync.Pool
+}
+
+// New returns a new [Pool] for T, and will use fn to construct new Ts when
+// the pool is empty.
+func New[T any](fn func() T) *Pool[T] {
+ return &Pool[T]{
+ pool: sync.Pool{
+ New: func() any {
+ return fn()
+ },
+ },
+ }
+}
+
+// Get gets a T from the pool, or creates a new one if the pool is empty.
+func (p *Pool[T]) Get() T {
+ return p.pool.Get().(T)
+}
+
+// Put returns x into the pool.
+func (p *Pool[T]) Put(x T) {
+ p.pool.Put(x)
+}
diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
new file mode 100644
index 0000000..82af755
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
@@ -0,0 +1,181 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package stacktrace provides support for gathering stack traces
+// efficiently.
+package stacktrace
+
+import (
+ "runtime"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
+)
+
+var _stackPool = pool.New(func() *Stack {
+ return &Stack{
+ storage: make([]uintptr, 64),
+ }
+})
+
+// Stack is a captured stack trace.
+type Stack struct {
+ pcs []uintptr // program counters; always a subslice of storage
+ frames *runtime.Frames
+
+ // The size of pcs varies depending on requirements:
+ // it will be one if the only the first frame was requested,
+ // and otherwise it will reflect the depth of the call stack.
+ //
+ // storage decouples the slice we need (pcs) from the slice we pool.
+ // We will always allocate a reasonably large storage, but we'll use
+ // only as much of it as we need.
+ storage []uintptr
+}
+
+// Depth specifies how deep of a stack trace should be captured.
+type Depth int
+
+const (
+ // First captures only the first frame.
+ First Depth = iota
+
+ // Full captures the entire call stack, allocating more
+ // storage for it if needed.
+ Full
+)
+
+// Capture captures a stack trace of the specified depth, skipping
+// the provided number of frames. skip=0 identifies the caller of
+// Capture.
+//
+// The caller must call Free on the returned stacktrace after using it.
+func Capture(skip int, depth Depth) *Stack {
+ stack := _stackPool.Get()
+
+ switch depth {
+ case First:
+ stack.pcs = stack.storage[:1]
+ case Full:
+ stack.pcs = stack.storage
+ }
+
+ // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
+ // itself. +2 to skip captureStacktrace and runtime.Callers.
+ numFrames := runtime.Callers(
+ skip+2,
+ stack.pcs,
+ )
+
+ // runtime.Callers truncates the recorded stacktrace if there is no
+ // room in the provided slice. For the full stack trace, keep expanding
+ // storage until there are fewer frames than there is room.
+ if depth == Full {
+ pcs := stack.pcs
+ for numFrames == len(pcs) {
+ pcs = make([]uintptr, len(pcs)*2)
+ numFrames = runtime.Callers(skip+2, pcs)
+ }
+
+ // Discard old storage instead of returning it to the pool.
+ // This will adjust the pool size over time if stack traces are
+ // consistently very deep.
+ stack.storage = pcs
+ stack.pcs = pcs[:numFrames]
+ } else {
+ stack.pcs = stack.pcs[:numFrames]
+ }
+
+ stack.frames = runtime.CallersFrames(stack.pcs)
+ return stack
+}
+
+// Free releases resources associated with this stacktrace
+// and returns it back to the pool.
+func (st *Stack) Free() {
+ st.frames = nil
+ st.pcs = nil
+ _stackPool.Put(st)
+}
+
+// Count reports the total number of frames in this stacktrace.
+// Count DOES NOT change as Next is called.
+func (st *Stack) Count() int {
+ return len(st.pcs)
+}
+
+// Next returns the next frame in the stack trace,
+// and a boolean indicating whether there are more after it.
+func (st *Stack) Next() (_ runtime.Frame, more bool) {
+ return st.frames.Next()
+}
+
+// Take returns a string representation of the current stacktrace.
+//
+// skip is the number of frames to skip before recording the stack trace.
+// skip=0 identifies the caller of Take.
+func Take(skip int) string {
+ stack := Capture(skip+1, Full)
+ defer stack.Free()
+
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := NewFormatter(buffer)
+ stackfmt.FormatStack(stack)
+ return buffer.String()
+}
+
+// Formatter formats a stack trace into a readable string representation.
+type Formatter struct {
+ b *buffer.Buffer
+ nonEmpty bool // whehther we've written at least one frame already
+}
+
+// NewFormatter builds a new Formatter.
+func NewFormatter(b *buffer.Buffer) Formatter {
+ return Formatter{b: b}
+}
+
+// FormatStack formats all remaining frames in the provided stacktrace -- minus
+// the final runtime.main/runtime.goexit frame.
+func (sf *Formatter) FormatStack(stack *Stack) {
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := stack.Next(); more; frame, more = stack.Next() {
+ sf.FormatFrame(frame)
+ }
+}
+
+// FormatFrame formats the given frame.
+func (sf *Formatter) FormatFrame(frame runtime.Frame) {
+ if sf.nonEmpty {
+ sf.b.AppendByte('\n')
+ }
+ sf.nonEmpty = true
+ sf.b.AppendString(frame.Function)
+ sf.b.AppendByte('\n')
+ sf.b.AppendByte('\t')
+ sf.b.AppendString(frame.File)
+ sf.b.AppendByte(':')
+ sf.b.AppendInt(int64(frame.Line))
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
new file mode 100644
index 0000000..155b208
--- /dev/null
+++ b/vendor/go.uber.org/zap/level.go
@@ -0,0 +1,153 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "sync/atomic"
+
+ "go.uber.org/zap/internal"
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel = zapcore.DebugLevel
+ // InfoLevel is the default logging priority.
+ InfoLevel = zapcore.InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel = zapcore.WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel = zapcore.ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel = zapcore.DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel = zapcore.PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel = zapcore.FatalLevel
+)
+
+// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with
+// an anonymous function.
+//
+// It's particularly useful when splitting log output between different
+// outputs (e.g., standard error and standard out). For sample code, see the
+// package-level AdvancedConfiguration example.
+type LevelEnablerFunc func(zapcore.Level) bool
+
+// Enabled calls the wrapped function.
+func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) }
+
+// An AtomicLevel is an atomically changeable, dynamic logging level. It lets
+// you safely change the log level of a tree of loggers (the root logger and
+// any children created by adding context) at runtime.
+//
+// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to
+// alter its level.
+//
+// AtomicLevels must be created with the NewAtomicLevel constructor to allocate
+// their internal atomic pointer.
+type AtomicLevel struct {
+ l *atomic.Int32
+}
+
+var _ internal.LeveledEnabler = AtomicLevel{}
+
+// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
+// enabled.
+func NewAtomicLevel() AtomicLevel {
+ lvl := AtomicLevel{l: new(atomic.Int32)}
+ lvl.l.Store(int32(InfoLevel))
+ return lvl
+}
+
+// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
+// and then calls SetLevel with the given level.
+func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
+ a := NewAtomicLevel()
+ a.SetLevel(l)
+ return a
+}
+
+// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseAtomicLevel(text string) (AtomicLevel, error) {
+ a := NewAtomicLevel()
+ l, err := zapcore.ParseLevel(text)
+ if err != nil {
+ return a, err
+ }
+
+ a.SetLevel(l)
+ return a, nil
+}
+
+// Enabled implements the zapcore.LevelEnabler interface, which allows the
+// AtomicLevel to be used in place of traditional static levels.
+func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
+ return lvl.Level().Enabled(l)
+}
+
+// Level returns the minimum enabled log level.
+func (lvl AtomicLevel) Level() zapcore.Level {
+ return zapcore.Level(int8(lvl.l.Load()))
+}
+
+// SetLevel alters the logging level.
+func (lvl AtomicLevel) SetLevel(l zapcore.Level) {
+ lvl.l.Store(int32(l))
+}
+
+// String returns the string representation of the underlying Level.
+func (lvl AtomicLevel) String() string {
+ return lvl.Level().String()
+}
+
+// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text
+// representations as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl *AtomicLevel) UnmarshalText(text []byte) error {
+ if lvl.l == nil {
+ lvl.l = &atomic.Int32{}
+ }
+
+ var l zapcore.Level
+ if err := l.UnmarshalText(text); err != nil {
+ return err
+ }
+
+ lvl.SetLevel(l)
+ return nil
+}
+
+// MarshalText marshals the AtomicLevel to a byte slice. It uses the same
+// text representation as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl AtomicLevel) MarshalText() (text []byte, err error) {
+ return lvl.Level().MarshalText()
+}
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
new file mode 100644
index 0000000..c4d3003
--- /dev/null
+++ b/vendor/go.uber.org/zap/logger.go
@@ -0,0 +1,435 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/stacktrace"
+ "go.uber.org/zap/zapcore"
+)
+
+// A Logger provides fast, leveled, structured logging. All methods are safe
+// for concurrent use.
+//
+// The Logger is designed for contexts in which every microsecond and every
+// allocation matters, so its API intentionally favors performance and type
+// safety over brevity. For most applications, the SugaredLogger strikes a
+// better balance between performance and ergonomics.
+type Logger struct {
+ core zapcore.Core
+
+ development bool
+ addCaller bool
+ onPanic zapcore.CheckWriteHook // default is WriteThenPanic
+ onFatal zapcore.CheckWriteHook // default is WriteThenFatal
+
+ name string
+ errorOutput zapcore.WriteSyncer
+
+ addStack zapcore.LevelEnabler
+
+ callerSkip int
+
+ clock zapcore.Clock
+}
+
+// New constructs a new Logger from the provided zapcore.Core and Options. If
+// the passed zapcore.Core is nil, it falls back to using a no-op
+// implementation.
+//
+// This is the most flexible way to construct a Logger, but also the most
+// verbose. For typical use cases, the highly-opinionated presets
+// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
+// more convenient.
+//
+// For sample code, see the package-level AdvancedConfiguration example.
+func New(core zapcore.Core, options ...Option) *Logger {
+ if core == nil {
+ return NewNop()
+ }
+ log := &Logger{
+ core: core,
+ errorOutput: zapcore.Lock(os.Stderr),
+ addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
+ }
+ return log.WithOptions(options...)
+}
+
+// NewNop returns a no-op Logger. It never writes out logs or internal errors,
+// and it never runs user-defined hooks.
+//
+// Using WithOptions to replace the Core or error output of a no-op Logger can
+// re-enable logging.
+func NewNop() *Logger {
+ return &Logger{
+ core: zapcore.NewNopCore(),
+ errorOutput: zapcore.AddSync(io.Discard),
+ addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
+ }
+}
+
+// NewProduction builds a sensible production Logger that writes InfoLevel and
+// above logs to standard error as JSON.
+//
+// It's a shortcut for NewProductionConfig().Build(...Option).
+func NewProduction(options ...Option) (*Logger, error) {
+ return NewProductionConfig().Build(options...)
+}
+
+// NewDevelopment builds a development Logger that writes DebugLevel and above
+// logs to standard error in a human-friendly format.
+//
+// It's a shortcut for NewDevelopmentConfig().Build(...Option).
+func NewDevelopment(options ...Option) (*Logger, error) {
+ return NewDevelopmentConfig().Build(options...)
+}
+
+// Must is a helper that wraps a call to a function returning (*Logger, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initialization such as:
+//
+// var logger = zap.Must(zap.NewProduction())
+func Must(logger *Logger, err error) *Logger {
+ if err != nil {
+ panic(err)
+ }
+
+ return logger
+}
+
+// NewExample builds a Logger that's designed for use in zap's testable
+// examples. It writes DebugLevel and above logs to standard out as JSON, but
+// omits the timestamp and calling function to keep example output
+// short and deterministic.
+func NewExample(options ...Option) *Logger {
+ encoderCfg := zapcore.EncoderConfig{
+ MessageKey: "msg",
+ LevelKey: "level",
+ NameKey: "logger",
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ }
+ core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel)
+ return New(core).WithOptions(options...)
+}
+
+// Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
+// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
+// single application to use both Loggers and SugaredLoggers, converting
+// between them on the boundaries of performance-sensitive code.
+func (log *Logger) Sugar() *SugaredLogger {
+ core := log.clone()
+ core.callerSkip += 2
+ return &SugaredLogger{core}
+}
+
+// Named adds a new path segment to the logger's name. Segments are joined by
+// periods. By default, Loggers are unnamed.
+func (log *Logger) Named(s string) *Logger {
+ if s == "" {
+ return log
+ }
+ l := log.clone()
+ if log.name == "" {
+ l.name = s
+ } else {
+ l.name = strings.Join([]string{l.name, s}, ".")
+ }
+ return l
+}
+
+// WithOptions clones the current Logger, applies the supplied Options, and
+// returns the resulting Logger. It's safe to use concurrently.
+func (log *Logger) WithOptions(opts ...Option) *Logger {
+ c := log.clone()
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+ return c
+}
+
+// With creates a child logger and adds structured context to it. Fields added
+// to the child don't affect the parent, and vice versa. Any fields that
+// require evaluation (such as Objects) are evaluated upon invocation of With.
+func (log *Logger) With(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ l := log.clone()
+ l.core = l.core.With(fields)
+ return l
+}
+
+// WithLazy creates a child logger and adds structured context to it lazily.
+//
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// WithLazy provides a worthwhile performance optimization for contextual loggers
+// when the likelihood of using the child logger is low,
+// such as error paths and rarely taken branches.
+//
+// Similar to [With], fields added to the child don't affect the parent, and vice versa.
+func (log *Logger) WithLazy(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
+ return zapcore.NewLazyWith(core, fields)
+ }))
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (log *Logger) Level() zapcore.Level {
+ return zapcore.LevelOf(log.core)
+}
+
+// Check returns a CheckedEntry if logging a message at the specified level
+// is enabled. It's a completely optional optimization; in high-performance
+// applications, Check can help avoid allocating a slice to hold fields.
+func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ return log.check(lvl, msg)
+}
+
+// Log logs a message at the specified level. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+// Any Fields that require evaluation (such as Objects) are evaluated upon
+// invocation of Log.
+func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
+ if ce := log.check(lvl, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Debug logs a message at DebugLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Debug(msg string, fields ...Field) {
+ if ce := log.check(DebugLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Info logs a message at InfoLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Info(msg string, fields ...Field) {
+ if ce := log.check(InfoLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Warn logs a message at WarnLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Warn(msg string, fields ...Field) {
+ if ce := log.check(WarnLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Error logs a message at ErrorLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Error(msg string, fields ...Field) {
+ if ce := log.check(ErrorLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// DPanic logs a message at DPanicLevel. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+//
+// If the logger is in development mode, it then panics (DPanic means
+// "development panic"). This is useful for catching errors that are
+// recoverable, but shouldn't ever happen.
+func (log *Logger) DPanic(msg string, fields ...Field) {
+ if ce := log.check(DPanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Panic logs a message at PanicLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then panics, even if logging at PanicLevel is disabled.
+func (log *Logger) Panic(msg string, fields ...Field) {
+ if ce := log.check(PanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Fatal logs a message at FatalLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then calls os.Exit(1), even if logging at FatalLevel is
+// disabled.
+func (log *Logger) Fatal(msg string, fields ...Field) {
+ if ce := log.check(FatalLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Sync calls the underlying Core's Sync method, flushing any buffered log
+// entries. Applications should take care to call Sync before exiting.
+func (log *Logger) Sync() error {
+ return log.core.Sync()
+}
+
+// Core returns the Logger's underlying zapcore.Core.
+func (log *Logger) Core() zapcore.Core {
+ return log.core
+}
+
+// Name returns the Logger's underlying name,
+// or an empty string if the logger is unnamed.
+func (log *Logger) Name() string {
+ return log.name
+}
+
+func (log *Logger) clone() *Logger {
+ clone := *log
+ return &clone
+}
+
+func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ // Logger.check must always be called directly by a method in the
+ // Logger interface (e.g., Check, Info, Fatal).
+ // This skips Logger.check and the Info/Fatal/Check/etc. method that
+ // called it.
+ const callerSkipOffset = 2
+
+ // Check the level first to reduce the cost of disabled log calls.
+ // Since Panic and higher may exit, we skip the optimization for those levels.
+ if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) {
+ return nil
+ }
+
+ // Create basic checked entry thru the core; this will be non-nil if the
+ // log message will actually be written somewhere.
+ ent := zapcore.Entry{
+ LoggerName: log.name,
+ Time: log.clock.Now(),
+ Level: lvl,
+ Message: msg,
+ }
+ ce := log.core.Check(ent, nil)
+ willWrite := ce != nil
+
+ // Set up any required terminal behavior.
+ switch ent.Level {
+ case zapcore.PanicLevel:
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
+ case zapcore.FatalLevel:
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
+ case zapcore.DPanicLevel:
+ if log.development {
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
+ }
+ }
+
+ // Only do further annotation if we're going to write this message; checked
+ // entries that exist only for terminal behavior don't benefit from
+ // annotation.
+ if !willWrite {
+ return ce
+ }
+
+ // Thread the error output through to the CheckedEntry.
+ ce.ErrorOutput = log.errorOutput
+
+ addStack := log.addStack.Enabled(ce.Level)
+ if !log.addCaller && !addStack {
+ return ce
+ }
+
+ // Adding the caller or stack trace requires capturing the callers of
+ // this function. We'll share information between these two.
+ stackDepth := stacktrace.First
+ if addStack {
+ stackDepth = stacktrace.Full
+ }
+ stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
+ defer stack.Free()
+
+ if stack.Count() == 0 {
+ if log.addCaller {
+ fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
+ _ = log.errorOutput.Sync()
+ }
+ return ce
+ }
+
+ frame, more := stack.Next()
+
+ if log.addCaller {
+ ce.Caller = zapcore.EntryCaller{
+ Defined: frame.PC != 0,
+ PC: frame.PC,
+ File: frame.File,
+ Line: frame.Line,
+ Function: frame.Function,
+ }
+ }
+
+ if addStack {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := stacktrace.NewFormatter(buffer)
+
+ // We've already extracted the first frame, so format that
+ // separately and defer to stackfmt for the rest.
+ stackfmt.FormatFrame(frame)
+ if more {
+ stackfmt.FormatStack(stack)
+ }
+ ce.Stack = buffer.String()
+ }
+
+ return ce
+}
+
+func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
+ // A nil or WriteThenNoop hook will lead to continued execution after
+ // a Panic or Fatal log entry, which is unexpected. For example,
+ //
+ // f, err := os.Open(..)
+ // if err != nil {
+ // log.Fatal("cannot open", zap.Error(err))
+ // }
+ // fmt.Println(f.Name())
+ //
+ // The f.Name() will panic if we continue execution after the log.Fatal.
+ if override == nil || override == zapcore.WriteThenNoop {
+ return defaultHook
+ }
+ return override
+}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
new file mode 100644
index 0000000..43d357a
--- /dev/null
+++ b/vendor/go.uber.org/zap/options.go
@@ -0,0 +1,182 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// An Option configures a Logger.
+type Option interface {
+ apply(*Logger)
+}
+
+// optionFunc wraps a func so it satisfies the Option interface.
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+ f(log)
+}
+
+// WrapCore wraps or replaces the Logger's underlying zapcore.Core.
+func WrapCore(f func(zapcore.Core) zapcore.Core) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = f(log.core)
+ })
+}
+
+// Hooks registers functions which will be called each time the Logger writes
+// out an Entry. Repeated use of Hooks is additive.
+//
+// Hooks are useful for simple side effects, like capturing metrics for the
+// number of emitted logs. More complex side effects, including anything that
+// requires access to the Entry's structured fields, should be implemented as
+// a zapcore.Core instead. See zapcore.RegisterHooks for details.
+func Hooks(hooks ...func(zapcore.Entry) error) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = zapcore.RegisterHooks(log.core, hooks...)
+ })
+}
+
+// Fields adds fields to the Logger.
+func Fields(fs ...Field) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = log.core.With(fs)
+ })
+}
+
+// ErrorOutput sets the destination for errors generated by the Logger. Note
+// that this option only affects internal errors; for sample code that sends
+// error-level logs to a different location from info- and debug-level logs,
+// see the package-level AdvancedConfiguration example.
+//
+// The supplied WriteSyncer must be safe for concurrent use. The Open and
+// zapcore.Lock functions are the simplest ways to protect files with a mutex.
+func ErrorOutput(w zapcore.WriteSyncer) Option {
+ return optionFunc(func(log *Logger) {
+ log.errorOutput = w
+ })
+}
+
+// Development puts the logger in development mode, which makes DPanic-level
+// logs panic instead of simply logging an error.
+func Development() Option {
+ return optionFunc(func(log *Logger) {
+ log.development = true
+ })
+}
+
+// AddCaller configures the Logger to annotate each message with the filename,
+// line number, and function name of zap's caller. See also WithCaller.
+func AddCaller() Option {
+ return WithCaller(true)
+}
+
+// WithCaller configures the Logger to annotate each message with the filename,
+// line number, and function name of zap's caller, or not, depending on the
+// value of enabled. This is a generalized form of AddCaller.
+func WithCaller(enabled bool) Option {
+ return optionFunc(func(log *Logger) {
+ log.addCaller = enabled
+ })
+}
+
+// AddCallerSkip increases the number of callers skipped by caller annotation
+// (as enabled by the AddCaller option). When building wrappers around the
+// Logger and SugaredLogger, supplying this Option prevents zap from always
+// reporting the wrapper code as the caller.
+func AddCallerSkip(skip int) Option {
+ return optionFunc(func(log *Logger) {
+ log.callerSkip += skip
+ })
+}
+
+// AddStacktrace configures the Logger to record a stack trace for all messages at
+// or above a given level.
+func AddStacktrace(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ log.addStack = lvl
+ })
+}
+
+// IncreaseLevel increase the level of the logger. It has no effect if
+// the passed in level tries to decrease the level of the logger.
+func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ core, err := zapcore.NewIncreaseLevelCore(log.core, lvl)
+ if err != nil {
+ fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err)
+ } else {
+ log.core = core
+ }
+ })
+}
+
+// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
+// Zap will call this hook after writing a log statement with a Panic/DPanic level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
+//
+// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
+//
+// This is useful for testing Panic/DPanic log output.
+func WithPanicHook(hook zapcore.CheckWriteHook) Option {
+ return optionFunc(func(log *Logger) {
+ log.onPanic = hook
+ })
+}
+
+// OnFatal sets the action to take on fatal logs.
+//
+// Deprecated: Use [WithFatalHook] instead.
+func OnFatal(action zapcore.CheckWriteAction) Option {
+ return WithFatalHook(action)
+}
+
+// WithFatalHook sets a CheckWriteHook to run on fatal logs.
+// Zap will call this hook after writing a log statement with a Fatal level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a fatal log message, but it will not exit the
+// program.
+//
+// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
+//
+// It is important that the provided CheckWriteHook stops the control flow at
+// the current statement to meet expectations of callers of the logger.
+// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
+// minimum.
+func WithFatalHook(hook zapcore.CheckWriteHook) Option {
+ return optionFunc(func(log *Logger) {
+ log.onFatal = hook
+ })
+}
+
+// WithClock specifies the clock used by the logger to determine the current
+// time for logged entries. Defaults to the system clock with time.Now.
+func WithClock(clock zapcore.Clock) Option {
+ return optionFunc(func(log *Logger) {
+ log.clock = clock
+ })
+}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
new file mode 100644
index 0000000..499772a
--- /dev/null
+++ b/vendor/go.uber.org/zap/sink.go
@@ -0,0 +1,180 @@
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const schemeFile = "file"
+
+var _sinkRegistry = newSinkRegistry()
+
+// Sink defines the interface to write to and close logger destinations.
+type Sink interface {
+ zapcore.WriteSyncer
+ io.Closer
+}
+
+type errSinkNotFound struct {
+ scheme string
+}
+
+func (e *errSinkNotFound) Error() string {
+ return fmt.Sprintf("no sink found for scheme %q", e.scheme)
+}
+
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type sinkRegistry struct {
+ mu sync.Mutex
+ factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+ openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
+}
+
+func newSinkRegistry() *sinkRegistry {
+ sr := &sinkRegistry{
+ factories: make(map[string]func(*url.URL) (Sink, error)),
+ openFile: os.OpenFile,
+ }
+ // Infallible operation: the registry is empty, so we can't have a conflict.
+ _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+ return sr
+}
+
+// RegisterScheme registers the given factory for the specific scheme.
+func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
+
+ if scheme == "" {
+ return errors.New("can't register a sink factory for empty string")
+ }
+ normalized, err := normalizeScheme(scheme)
+ if err != nil {
+ return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
+ }
+ if _, ok := sr.factories[normalized]; ok {
+ return fmt.Errorf("sink factory already registered for scheme %q", normalized)
+ }
+ sr.factories[normalized] = factory
+ return nil
+}
+
+func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
+ // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
+ // the drive, and path is unset unless `c:/log.txt` is used.
+ // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
+ // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
+ if filepath.IsAbs(rawURL) {
+ return sr.newFileSinkFromPath(rawURL)
+ }
+
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
+ }
+ if u.Scheme == "" {
+ u.Scheme = schemeFile
+ }
+
+ sr.mu.Lock()
+ factory, ok := sr.factories[u.Scheme]
+ sr.mu.Unlock()
+ if !ok {
+ return nil, &errSinkNotFound{u.Scheme}
+ }
+ return factory(u)
+}
+
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 0.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ return _sinkRegistry.RegisterSink(scheme, factory)
+}
+
+func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
+ if u.User != nil {
+ return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
+ }
+ if u.Fragment != "" {
+ return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u)
+ }
+ if u.RawQuery != "" {
+ return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u)
+ }
+ // Error messages are better if we check hostname and port separately.
+ if u.Port() != "" {
+ return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u)
+ }
+ if hn := u.Hostname(); hn != "" && hn != "localhost" {
+ return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
+ }
+
+ return sr.newFileSinkFromPath(u.Path)
+}
+
+func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
+ switch path {
+ case "stdout":
+ return nopCloserSink{os.Stdout}, nil
+ case "stderr":
+ return nopCloserSink{os.Stderr}, nil
+ }
+ return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
+}
+
+func normalizeScheme(s string) (string, error) {
+ // https://tools.ietf.org/html/rfc3986#section-3.1
+ s = strings.ToLower(s)
+ if first := s[0]; 'a' > first || 'z' < first {
+ return "", errors.New("must start with a letter")
+ }
+ for i := 1; i < len(s); i++ { // iterate over bytes, not runes
+ c := s[i]
+ switch {
+ case 'a' <= c && c <= 'z':
+ continue
+ case '0' <= c && c <= '9':
+ continue
+ case c == '.' || c == '+' || c == '-':
+ continue
+ }
+ return "", fmt.Errorf("may not contain %q", c)
+ }
+ return s, nil
+}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
new file mode 100644
index 0000000..8904cd0
--- /dev/null
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -0,0 +1,476 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ _oddNumberErrMsg = "Ignored key without a value."
+ _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+ _multipleErrMsg = "Multiple errors without a key."
+)
+
+// A SugaredLogger wraps the base Logger functionality in a slower, but less
+// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar
+// method.
+//
+// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
+// For each log level, it exposes four methods:
+//
+// - methods named after the log level for log.Print-style logging
+// - methods ending in "w" for loosely-typed structured logging
+// - methods ending in "f" for log.Printf-style logging
+// - methods ending in "ln" for log.Println-style logging
+//
+// For example, the methods for InfoLevel are:
+//
+// Info(...any) Print-style logging
+// Infow(...any) Structured logging (read as "info with")
+// Infof(string, ...any) Printf-style logging
+// Infoln(...any) Println-style logging
+type SugaredLogger struct {
+ base *Logger
+}
+
+// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring
+// is quite inexpensive, so it's reasonable for a single application to use
+// both Loggers and SugaredLoggers, converting between them on the boundaries
+// of performance-sensitive code.
+func (s *SugaredLogger) Desugar() *Logger {
+ base := s.base.clone()
+ base.callerSkip -= 2
+ return base
+}
+
+// Named adds a sub-scope to the logger's name. See Logger.Named for details.
+func (s *SugaredLogger) Named(name string) *SugaredLogger {
+ return &SugaredLogger{base: s.base.Named(name)}
+}
+
+// WithOptions clones the current SugaredLogger, applies the supplied Options,
+// and returns the result. It's safe to use concurrently.
+func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
+ base := s.base.clone()
+ for _, opt := range opts {
+ opt.apply(base)
+ }
+ return &SugaredLogger{base: base}
+}
+
+// With adds a variadic number of fields to the logging context. It accepts a
+// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
+// processing pairs, the first element of the pair is used as the field key
+// and the second as the field value.
+//
+// For example,
+//
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+//
+// is the equivalent of
+//
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
+//
+// Note that the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics. In production, the logger is more
+// forgiving: a separate error is logged, but the key-value pair is skipped
+// and execution continues. Passing an orphaned key triggers similar behavior:
+// panics in development and errors in production.
+func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
+}
+
+// WithLazy adds a variadic number of fields to the logging context lazily.
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// Similar to [With], fields added to the child don't affect the parent,
+// and vice versa. Also, the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics, while in production it logs an error and skips the pair.
+// Passing an orphaned key has the same behavior.
+func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (s *SugaredLogger) Level() zapcore.Level {
+ return zapcore.LevelOf(s.base.core)
+}
+
+// Log logs the provided arguments at provided level.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
+ s.log(lvl, "", args, nil)
+}
+
+// Debug logs the provided arguments at [DebugLevel].
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Debug(args ...interface{}) {
+ s.log(DebugLevel, "", args, nil)
+}
+
+// Info logs the provided arguments at [InfoLevel].
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Info(args ...interface{}) {
+ s.log(InfoLevel, "", args, nil)
+}
+
+// Warn logs the provided arguments at [WarnLevel].
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Warn(args ...interface{}) {
+ s.log(WarnLevel, "", args, nil)
+}
+
+// Error logs the provided arguments at [ErrorLevel].
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Error(args ...interface{}) {
+ s.log(ErrorLevel, "", args, nil)
+}
+
+// DPanic logs the provided arguments at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) DPanic(args ...interface{}) {
+ s.log(DPanicLevel, "", args, nil)
+}
+
+// Panic constructs a message with the provided arguments and panics.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Panic(args ...interface{}) {
+ s.log(PanicLevel, "", args, nil)
+}
+
+// Fatal constructs a message with the provided arguments and calls os.Exit.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Fatal(args ...interface{}) {
+ s.log(FatalLevel, "", args, nil)
+}
+
+// Logf formats the message according to the format specifier
+// and logs it at provided level.
+func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
+ s.log(lvl, template, args, nil)
+}
+
+// Debugf formats the message according to the format specifier
+// and logs it at [DebugLevel].
+func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
+ s.log(DebugLevel, template, args, nil)
+}
+
+// Infof formats the message according to the format specifier
+// and logs it at [InfoLevel].
+func (s *SugaredLogger) Infof(template string, args ...interface{}) {
+ s.log(InfoLevel, template, args, nil)
+}
+
+// Warnf formats the message according to the format specifier
+// and logs it at [WarnLevel].
+func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
+ s.log(WarnLevel, template, args, nil)
+}
+
+// Errorf formats the message according to the format specifier
+// and logs it at [ErrorLevel].
+func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
+ s.log(ErrorLevel, template, args, nil)
+}
+
+// DPanicf formats the message according to the format specifier
+// and logs it at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
+ s.log(DPanicLevel, template, args, nil)
+}
+
+// Panicf formats the message according to the format specifier
+// and panics.
+func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
+ s.log(PanicLevel, template, args, nil)
+}
+
+// Fatalf formats the message according to the format specifier
+// and calls os.Exit.
+func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
+ s.log(FatalLevel, template, args, nil)
+}
+
+// Logw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
+ s.log(lvl, msg, nil, keysAndValues)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+//
+// When debug-level logging is disabled, this is much faster than
+//
+// s.With(keysAndValues).Debug(msg)
+func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
+ s.log(DebugLevel, msg, nil, keysAndValues)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) {
+ s.log(InfoLevel, msg, nil, keysAndValues)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) {
+ s.log(WarnLevel, msg, nil, keysAndValues)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) {
+ s.log(ErrorLevel, msg, nil, keysAndValues)
+}
+
+// DPanicw logs a message with some additional context. In development, the
+// logger then panics. (See DPanicLevel for details.) The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) {
+ s.log(DPanicLevel, msg, nil, keysAndValues)
+}
+
+// Panicw logs a message with some additional context, then panics. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) {
+ s.log(PanicLevel, msg, nil, keysAndValues)
+}
+
+// Fatalw logs a message with some additional context, then calls os.Exit. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
+ s.log(FatalLevel, msg, nil, keysAndValues)
+}
+
+// Logln logs a message at provided level.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
+ s.logln(lvl, args, nil)
+}
+
+// Debugln logs a message at [DebugLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Debugln(args ...interface{}) {
+ s.logln(DebugLevel, args, nil)
+}
+
+// Infoln logs a message at [InfoLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Infoln(args ...interface{}) {
+ s.logln(InfoLevel, args, nil)
+}
+
+// Warnln logs a message at [WarnLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Warnln(args ...interface{}) {
+ s.logln(WarnLevel, args, nil)
+}
+
+// Errorln logs a message at [ErrorLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Errorln(args ...interface{}) {
+ s.logln(ErrorLevel, args, nil)
+}
+
+// DPanicln logs a message at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are always added between arguments.
+func (s *SugaredLogger) DPanicln(args ...interface{}) {
+ s.logln(DPanicLevel, args, nil)
+}
+
+// Panicln logs a message at [PanicLevel] and panics.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Panicln(args ...interface{}) {
+ s.logln(PanicLevel, args, nil)
+}
+
+// Fatalln logs a message at [FatalLevel] and calls os.Exit.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Fatalln(args ...interface{}) {
+ s.logln(FatalLevel, args, nil)
+}
+
+// Sync flushes any buffered log entries.
+func (s *SugaredLogger) Sync() error {
+ return s.base.Sync()
+}
+
+// log message with Sprint, Sprintf, or neither.
+func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
+ // If logging at this level is completely disabled, skip the overhead of
+ // string formatting.
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessage(template, fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
+// logln message with Sprintln
+func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessageln(fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
+// getMessage format with Sprint, Sprintf, or neither.
+func getMessage(template string, fmtArgs []interface{}) string {
+ if len(fmtArgs) == 0 {
+ return template
+ }
+
+ if template != "" {
+ return fmt.Sprintf(template, fmtArgs...)
+ }
+
+ if len(fmtArgs) == 1 {
+ if str, ok := fmtArgs[0].(string); ok {
+ return str
+ }
+ }
+ return fmt.Sprint(fmtArgs...)
+}
+
+// getMessageln format with Sprintln.
+func getMessageln(fmtArgs []interface{}) string {
+ msg := fmt.Sprintln(fmtArgs...)
+ return msg[:len(msg)-1]
+}
+
+func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
+ if len(args) == 0 {
+ return nil
+ }
+
+ var (
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields = make([]Field, 0, len(args))
+ invalid invalidPairs
+ seenError bool
+ )
+
+ for i := 0; i < len(args); {
+ // This is a strongly-typed field. Consume it and move on.
+ if f, ok := args[i].(Field); ok {
+ fields = append(fields, f)
+ i++
+ continue
+ }
+
+ // If it is an error, consume it and move on.
+ if err, ok := args[i].(error); ok {
+ if !seenError {
+ seenError = true
+ fields = append(fields, Error(err))
+ } else {
+ s.base.Error(_multipleErrMsg, Error(err))
+ }
+ i++
+ continue
+ }
+
+ // Make sure this element isn't a dangling key.
+ if i == len(args)-1 {
+ s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
+ break
+ }
+
+ // Consume this value and the next, treating them as a key-value pair. If the
+ // key isn't a string, add this pair to the slice of invalid pairs.
+ key, val := args[i], args[i+1]
+ if keyStr, ok := key.(string); !ok {
+ // Subsequent errors are likely, so allocate once up front.
+ if cap(invalid) == 0 {
+ invalid = make(invalidPairs, 0, len(args)/2)
+ }
+ invalid = append(invalid, invalidPair{i, key, val})
+ } else {
+ fields = append(fields, Any(keyStr, val))
+ }
+ i += 2
+ }
+
+ // If we encountered any invalid key-value pairs, log an error.
+ if len(invalid) > 0 {
+ s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid))
+ }
+ return fields
+}
+
+type invalidPair struct {
+ position int
+ key, value interface{}
+}
+
+func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddInt64("position", int64(p.position))
+ Any("key", p.key).AddTo(enc)
+ Any("value", p.value).AddTo(enc)
+ return nil
+}
+
+type invalidPairs []invalidPair
+
+func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ var err error
+ for i := range ps {
+ err = multierr.Append(err, enc.AppendObject(ps[i]))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go
new file mode 100644
index 0000000..c5a1f16
--- /dev/null
+++ b/vendor/go.uber.org/zap/time.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import "time"
+
+func timeToMillis(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond)
+}
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
new file mode 100644
index 0000000..06768c6
--- /dev/null
+++ b/vendor/go.uber.org/zap/writer.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+// Open is a high-level wrapper that takes a variadic number of URLs, opens or
+// creates each of the specified resources, and combines them into a locked
+// WriteSyncer. It also returns any error encountered and a function to close
+// any opened files.
+//
+// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a
+// scheme and URLs with the "file" scheme. Third-party code may register
+// factories for other schemes using RegisterSink.
+//
+// URLs with the "file" scheme must use absolute paths on the local
+// filesystem. No user, password, port, fragments, or query parameters are
+// allowed, and the hostname must be empty or "localhost".
+//
+// Since it's common to write logs to the local filesystem, URLs without a
+// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without
+// a scheme, the special paths "stdout" and "stderr" are interpreted as
+// os.Stdout and os.Stderr. When specified without a scheme, relative file
+// paths also work.
+func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
+ writers, closeAll, err := open(paths)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ writer := CombineWriteSyncers(writers...)
+ return writer, closeAll, nil
+}
+
+func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
+ writers := make([]zapcore.WriteSyncer, 0, len(paths))
+ closers := make([]io.Closer, 0, len(paths))
+ closeAll := func() {
+ for _, c := range closers {
+ _ = c.Close()
+ }
+ }
+
+ var openErr error
+ for _, path := range paths {
+ sink, err := _sinkRegistry.newSink(path)
+ if err != nil {
+ openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
+ continue
+ }
+ writers = append(writers, sink)
+ closers = append(closers, sink)
+ }
+ if openErr != nil {
+ closeAll()
+ return nil, nil, openErr
+ }
+
+ return writers, closeAll, nil
+}
+
+// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
+// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op
+// WriteSyncer.
+//
+// It's provided purely as a convenience; the result is no different from
+// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
+func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
+ if len(writers) == 0 {
+ return zapcore.AddSync(io.Discard)
+ }
+ return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
+}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
new file mode 100644
index 0000000..a40e93b
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -0,0 +1,219 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bufio"
+ "sync"
+ "time"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ // _defaultBufferSize specifies the default size used by Buffer.
+ _defaultBufferSize = 256 * 1024 // 256 kB
+
+ // _defaultFlushInterval specifies the default flush interval for
+ // Buffer.
+ _defaultFlushInterval = 30 * time.Second
+)
+
+// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before
+// flushing them to a wrapped WriteSyncer after reaching some limit, or at some
+// fixed interval--whichever comes first.
+//
+// BufferedWriteSyncer is safe for concurrent use. You don't need to use
+// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+//
+// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
+// destination (*os.File is a valid WriteSyncer), wrap it with
+// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
+// object.
+//
+// func main() {
+// ws := ... // your log destination
+// bws := &zapcore.BufferedWriteSyncer{WS: ws}
+// defer bws.Stop()
+//
+// // ...
+// core := zapcore.NewCore(enc, bws, lvl)
+// logger := zap.New(core)
+//
+// // ...
+// }
+//
+// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
+// waiting at most 30 seconds between flushes.
+// You can customize these parameters by setting the Size or FlushInterval
+// fields.
+// For example, the following buffers up to 512 kB of logs before flushing them
+// to Stderr, with a maximum of one minute between each flush.
+//
+// ws := &BufferedWriteSyncer{
+// WS: os.Stderr,
+// Size: 512 * 1024, // 512 kB
+// FlushInterval: time.Minute,
+// }
+// defer ws.Stop()
+type BufferedWriteSyncer struct {
+ // WS is the WriteSyncer around which BufferedWriteSyncer will buffer
+ // writes.
+ //
+ // This field is required.
+ WS WriteSyncer
+
+ // Size specifies the maximum amount of data the writer will buffered
+ // before flushing.
+ //
+ // Defaults to 256 kB if unspecified.
+ Size int
+
+ // FlushInterval specifies how often the writer should flush data if
+ // there have been no writes.
+ //
+ // Defaults to 30 seconds if unspecified.
+ FlushInterval time.Duration
+
+ // Clock, if specified, provides control of the source of time for the
+ // writer.
+ //
+ // Defaults to the system clock.
+ Clock Clock
+
+ // unexported fields for state
+ mu sync.Mutex
+ initialized bool // whether initialize() has run
+ stopped bool // whether Stop() has run
+ writer *bufio.Writer
+ ticker *time.Ticker
+ stop chan struct{} // closed when flushLoop should stop
+ done chan struct{} // closed when flushLoop has stopped
+}
+
+func (s *BufferedWriteSyncer) initialize() {
+ size := s.Size
+ if size == 0 {
+ size = _defaultBufferSize
+ }
+
+ flushInterval := s.FlushInterval
+ if flushInterval == 0 {
+ flushInterval = _defaultFlushInterval
+ }
+
+ if s.Clock == nil {
+ s.Clock = DefaultClock
+ }
+
+ s.ticker = s.Clock.NewTicker(flushInterval)
+ s.writer = bufio.NewWriterSize(s.WS, size)
+ s.stop = make(chan struct{})
+ s.done = make(chan struct{})
+ s.initialized = true
+ go s.flushLoop()
+}
+
+// Write writes log data into buffer syncer directly, multiple Write calls will be batched,
+// and log data will be flushed to disk when the buffer is full or periodically.
+func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ s.initialize()
+ }
+
+ // To avoid partial writes from being flushed, we manually flush the existing buffer if:
+ // * The current write doesn't fit into the buffer fully, and
+ // * The buffer is not empty (since bufio will not split large writes when the buffer is empty)
+ if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 {
+ if err := s.writer.Flush(); err != nil {
+ return 0, err
+ }
+ }
+
+ return s.writer.Write(bs)
+}
+
+// Sync flushes buffered log data into disk directly.
+func (s *BufferedWriteSyncer) Sync() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var err error
+ if s.initialized {
+ err = s.writer.Flush()
+ }
+
+ return multierr.Append(err, s.WS.Sync())
+}
+
+// flushLoop flushes the buffer at the configured interval until Stop is
+// called.
+func (s *BufferedWriteSyncer) flushLoop() {
+ defer close(s.done)
+
+ for {
+ select {
+ case <-s.ticker.C:
+ // we just simply ignore error here
+ // because the underlying bufio writer stores any errors
+ // and we return any error from Sync() as part of the close
+ _ = s.Sync()
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+// Stop closes the buffer, cleans up background goroutines, and flushes
+// remaining unwritten data.
+func (s *BufferedWriteSyncer) Stop() (err error) {
+ var stopped bool
+
+ // Critical section.
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ return
+ }
+
+ stopped = s.stopped
+ if stopped {
+ return
+ }
+ s.stopped = true
+
+ s.ticker.Stop()
+ close(s.stop) // tell flushLoop to stop
+ <-s.done // and wait until it has
+ }()
+
+ // Don't call Sync on consecutive Stops.
+ if !stopped {
+ err = s.Sync()
+ }
+
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
new file mode 100644
index 0000000..422fd82
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// DefaultClock is the default clock used by Zap in operations that require
+// time. This clock uses the system clock for all operations.
+var DefaultClock = systemClock{}
+
+// Clock is a source of time for logged entries.
+type Clock interface {
+ // Now returns the current local time.
+ Now() time.Time
+
+ // NewTicker returns *time.Ticker that holds a channel
+ // that delivers "ticks" of a clock.
+ NewTicker(time.Duration) *time.Ticker
+}
+
+// systemClock implements default Clock that uses system time.
+type systemClock struct{}
+
+func (systemClock) Now() time.Time {
+ return time.Now()
+}
+
+func (systemClock) NewTicker(duration time.Duration) *time.Ticker {
+ return time.NewTicker(duration)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
new file mode 100644
index 0000000..cc2b4e0
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
+)
+
+var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder {
+ return &sliceArrayEncoder{
+ elems: make([]interface{}, 0, 2),
+ }
+})
+
+func getSliceEncoder() *sliceArrayEncoder {
+ return _sliceEncoderPool.Get()
+}
+
+func putSliceEncoder(e *sliceArrayEncoder) {
+ e.elems = e.elems[:0]
+ _sliceEncoderPool.Put(e)
+}
+
+type consoleEncoder struct {
+ *jsonEncoder
+}
+
+// NewConsoleEncoder creates an encoder whose output is designed for human -
+// rather than machine - consumption. It serializes the core log entry data
+// (message, level, timestamp, etc.) in a plain-text format and leaves the
+// structured context as JSON.
+//
+// Note that although the console encoder doesn't use the keys specified in the
+// encoder configuration, it will omit any element whose key is set to the empty
+// string.
+func NewConsoleEncoder(cfg EncoderConfig) Encoder {
+ if cfg.ConsoleSeparator == "" {
+ // Use a default delimiter of '\t' for backwards compatibility
+ cfg.ConsoleSeparator = "\t"
+ }
+ return consoleEncoder{newJSONEncoder(cfg, true)}
+}
+
+func (c consoleEncoder) Clone() Encoder {
+ return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)}
+}
+
+func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ line := bufferpool.Get()
+
+ // We don't want the entry's metadata to be quoted and escaped (if it's
+ // encoded as strings), which means that we can't use the JSON encoder. The
+ // simplest option is to use the memory encoder and fmt.Fprint.
+ //
+ // If this ever becomes a performance bottleneck, we can implement
+ // ArrayEncoder for our plain-text format.
+ arr := getSliceEncoder()
+ if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() {
+ c.EncodeTime(ent.Time, arr)
+ }
+ if c.LevelKey != "" && c.EncodeLevel != nil {
+ c.EncodeLevel(ent.Level, arr)
+ }
+ if ent.LoggerName != "" && c.NameKey != "" {
+ nameEncoder := c.EncodeName
+
+ if nameEncoder == nil {
+ // Fall back to FullNameEncoder for backward compatibility.
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, arr)
+ }
+ if ent.Caller.Defined {
+ if c.CallerKey != "" && c.EncodeCaller != nil {
+ c.EncodeCaller(ent.Caller, arr)
+ }
+ if c.FunctionKey != "" {
+ arr.AppendString(ent.Caller.Function)
+ }
+ }
+ for i := range arr.elems {
+ if i > 0 {
+ line.AppendString(c.ConsoleSeparator)
+ }
+ fmt.Fprint(line, arr.elems[i])
+ }
+ putSliceEncoder(arr)
+
+ // Add the message itself.
+ if c.MessageKey != "" {
+ c.addSeparatorIfNecessary(line)
+ line.AppendString(ent.Message)
+ }
+
+ // Add any structured context.
+ c.writeContext(line, fields)
+
+ // If there's no stacktrace key, honor that; this allows users to force
+ // single-line output.
+ if ent.Stack != "" && c.StacktraceKey != "" {
+ line.AppendByte('\n')
+ line.AppendString(ent.Stack)
+ }
+
+ line.AppendString(c.LineEnding)
+ return line, nil
+}
+
+func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) {
+ context := c.jsonEncoder.Clone().(*jsonEncoder)
+ defer func() {
+ // putJSONEncoder assumes the buffer is still used, but we write out the buffer so
+ // we can free it.
+ context.buf.Free()
+ putJSONEncoder(context)
+ }()
+
+ addFields(context, extra)
+ context.closeOpenNamespaces()
+ if context.buf.Len() == 0 {
+ return
+ }
+
+ c.addSeparatorIfNecessary(line)
+ line.AppendByte('{')
+ line.Write(context.buf.Bytes())
+ line.AppendByte('}')
+}
+
+func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) {
+ if line.Len() > 0 {
+ line.AppendString(c.ConsoleSeparator)
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
new file mode 100644
index 0000000..776e93f
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -0,0 +1,122 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// Core is a minimal, fast logger interface. It's designed for library authors
+// to wrap in a more user-friendly API.
+type Core interface {
+ LevelEnabler
+
+ // With adds structured context to the Core.
+ With([]Field) Core
+ // Check determines whether the supplied Entry should be logged (using the
+ // embedded LevelEnabler and possibly some extra logic). If the entry
+ // should be logged, the Core adds itself to the CheckedEntry and returns
+ // the result.
+ //
+ // Callers must use Check before calling Write.
+ Check(Entry, *CheckedEntry) *CheckedEntry
+ // Write serializes the Entry and any Fields supplied at the log site and
+ // writes them to their destination.
+ //
+ // If called, Write should always log the Entry and Fields; it should not
+ // replicate the logic of Check.
+ Write(Entry, []Field) error
+ // Sync flushes buffered logs (if any).
+ Sync() error
+}
+
+type nopCore struct{}
+
+// NewNopCore returns a no-op Core.
+func NewNopCore() Core { return nopCore{} }
+func (nopCore) Enabled(Level) bool { return false }
+func (n nopCore) With([]Field) Core { return n }
+func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce }
+func (nopCore) Write(Entry, []Field) error { return nil }
+func (nopCore) Sync() error { return nil }
+
+// NewCore creates a Core that writes logs to a WriteSyncer.
+func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core {
+ return &ioCore{
+ LevelEnabler: enab,
+ enc: enc,
+ out: ws,
+ }
+}
+
+type ioCore struct {
+ LevelEnabler
+ enc Encoder
+ out WriteSyncer
+}
+
+var (
+ _ Core = (*ioCore)(nil)
+ _ leveledEnabler = (*ioCore)(nil)
+)
+
+func (c *ioCore) Level() Level {
+ return LevelOf(c.LevelEnabler)
+}
+
+func (c *ioCore) With(fields []Field) Core {
+ clone := c.clone()
+ addFields(clone.enc, fields)
+ return clone
+}
+
+func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if c.Enabled(ent.Level) {
+ return ce.AddCore(ent, c)
+ }
+ return ce
+}
+
+func (c *ioCore) Write(ent Entry, fields []Field) error {
+ buf, err := c.enc.EncodeEntry(ent, fields)
+ if err != nil {
+ return err
+ }
+ _, err = c.out.Write(buf.Bytes())
+ buf.Free()
+ if err != nil {
+ return err
+ }
+ if ent.Level > ErrorLevel {
+ // Since we may be crashing the program, sync the output.
+ // Ignore Sync errors, pending a clean solution to issue #370.
+ _ = c.Sync()
+ }
+ return nil
+}
+
+func (c *ioCore) Sync() error {
+ return c.out.Sync()
+}
+
+func (c *ioCore) clone() *ioCore {
+ return &ioCore{
+ LevelEnabler: c.LevelEnabler,
+ enc: c.enc.Clone(),
+ out: c.out,
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go
new file mode 100644
index 0000000..31000e9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapcore defines and implements the low-level interfaces upon which
+// zap is built. By providing alternate implementations of these interfaces,
+// external packages can extend zap's capabilities.
+package zapcore // import "go.uber.org/zap/zapcore"
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
new file mode 100644
index 0000000..0446254
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -0,0 +1,466 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+
+ "go.uber.org/zap/buffer"
+)
+
+// DefaultLineEnding defines the default line ending when writing logs.
+// Alternate line endings specified in EncoderConfig can override this
+// behavior.
+const DefaultLineEnding = "\n"
+
+// OmitKey defines the key to use when callers want to remove a key from log output.
+const OmitKey = ""
+
+// A LevelEncoder serializes a Level to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
+type LevelEncoder func(Level, PrimitiveArrayEncoder)
+
+// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
+// InfoLevel is serialized to "info".
+func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.String())
+}
+
+// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring.
+// For example, InfoLevel is serialized to "info" and colored blue.
+func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToLowercaseColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.String())
+ }
+ enc.AppendString(s)
+}
+
+// CapitalLevelEncoder serializes a Level to an all-caps string. For example,
+// InfoLevel is serialized to "INFO".
+func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.CapitalString())
+}
+
+// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color.
+// For example, InfoLevel is serialized to "INFO" and colored blue.
+func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToCapitalColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.CapitalString())
+ }
+ enc.AppendString(s)
+}
+
+// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to
+// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder,
+// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else
+// is unmarshaled to LowercaseLevelEncoder.
+func (e *LevelEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "capital":
+ *e = CapitalLevelEncoder
+ case "capitalColor":
+ *e = CapitalColorLevelEncoder
+ case "color":
+ *e = LowercaseColorLevelEncoder
+ default:
+ *e = LowercaseLevelEncoder
+ }
+ return nil
+}
+
+// A TimeEncoder serializes a time.Time to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
+type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
+
+// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
+// since the Unix epoch.
+func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ sec := float64(nanos) / float64(time.Second)
+ enc.AppendFloat64(sec)
+}
+
+// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of
+// milliseconds since the Unix epoch.
+func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ millis := float64(nanos) / float64(time.Millisecond)
+ enc.AppendFloat64(millis)
+}
+
+// EpochNanosTimeEncoder serializes a time.Time to an integer number of
+// nanoseconds since the Unix epoch.
+func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(t.UnixNano())
+}
+
+func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) {
+ type appendTimeEncoder interface {
+ AppendTimeLayout(time.Time, string)
+ }
+
+ if enc, ok := enc.(appendTimeEncoder); ok {
+ enc.AppendTimeLayout(t, layout)
+ return
+ }
+
+ enc.AppendString(t.Format(layout))
+}
+
+// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string
+// with millisecond precision.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc)
+}
+
+// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, time.RFC3339, enc)
+}
+
+// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string
+// with nanosecond precision.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, time.RFC3339Nano, enc)
+}
+
+// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using
+// given layout.
+func TimeEncoderOfLayout(layout string) TimeEncoder {
+ return func(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, layout, enc)
+ }
+}
+
+// UnmarshalText unmarshals text to a TimeEncoder.
+// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder.
+// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder.
+// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder.
+// "millis" is unmarshaled to EpochMillisTimeEncoder.
+// "nanos" is unmarshaled to EpochNanosEncoder.
+// Anything else is unmarshaled to EpochTimeEncoder.
+func (e *TimeEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "rfc3339nano", "RFC3339Nano":
+ *e = RFC3339NanoTimeEncoder
+ case "rfc3339", "RFC3339":
+ *e = RFC3339TimeEncoder
+ case "iso8601", "ISO8601":
+ *e = ISO8601TimeEncoder
+ case "millis":
+ *e = EpochMillisTimeEncoder
+ case "nanos":
+ *e = EpochNanosTimeEncoder
+ default:
+ *e = EpochTimeEncoder
+ }
+ return nil
+}
+
+// UnmarshalYAML unmarshals YAML to a TimeEncoder.
+// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
+//
+// timeEncoder:
+// layout: 06/01/02 03:04pm
+//
+// If value is string, it uses UnmarshalText.
+//
+// timeEncoder: iso8601
+func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var o struct {
+ Layout string `json:"layout" yaml:"layout"`
+ }
+ if err := unmarshal(&o); err == nil {
+ *e = TimeEncoderOfLayout(o.Layout)
+ return nil
+ }
+
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ return e.UnmarshalText([]byte(s))
+}
+
+// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does.
+func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
+ return e.UnmarshalYAML(func(v interface{}) error {
+ return json.Unmarshal(data, v)
+ })
+}
+
+// A DurationEncoder serializes a time.Duration to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
+type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
+
+// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
+func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendFloat64(float64(d) / float64(time.Second))
+}
+
+// NanosDurationEncoder serializes a time.Duration to an integer number of
+// nanoseconds elapsed.
+func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(int64(d))
+}
+
+// MillisDurationEncoder serializes a time.Duration to an integer number of
+// milliseconds elapsed.
+func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(d.Nanoseconds() / 1e6)
+}
+
+// StringDurationEncoder serializes a time.Duration using its built-in String
+// method.
+func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendString(d.String())
+}
+
+// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled
+// to StringDurationEncoder, and anything else is unmarshaled to
+// NanosDurationEncoder.
+func (e *DurationEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "string":
+ *e = StringDurationEncoder
+ case "nanos":
+ *e = NanosDurationEncoder
+ case "ms":
+ *e = MillisDurationEncoder
+ default:
+ *e = SecondsDurationEncoder
+ }
+ return nil
+}
+
+// A CallerEncoder serializes an EntryCaller to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
+type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
+
+// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
+// format.
+func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.String())
+}
+
+// ShortCallerEncoder serializes a caller in package/file:line format, trimming
+// all but the final directory from the full path.
+func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.TrimmedPath())
+}
+
+// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to
+// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder.
+func (e *CallerEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullCallerEncoder
+ default:
+ *e = ShortCallerEncoder
+ }
+ return nil
+}
+
+// A NameEncoder serializes a period-separated logger name to a primitive
+// type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
+type NameEncoder func(string, PrimitiveArrayEncoder)
+
+// FullNameEncoder serializes the logger name as-is.
+func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) {
+ enc.AppendString(loggerName)
+}
+
+// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is
+// unmarshaled to FullNameEncoder.
+func (e *NameEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullNameEncoder
+ default:
+ *e = FullNameEncoder
+ }
+ return nil
+}
+
+// An EncoderConfig allows users to configure the concrete encoders supplied by
+// zapcore.
+type EncoderConfig struct {
+ // Set the keys used for each log entry. If any key is empty, that portion
+ // of the entry is omitted.
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ // Configure the primitive representations of common complex types. For
+ // example, some users may want all time.Times serialized as floating-point
+ // seconds since epoch, while others may prefer ISO8601 strings.
+ EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
+ EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
+ EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
+ EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
+ // Unlike the other primitive type encoders, EncodeName is optional. The
+ // zero value falls back to FullNameEncoder.
+ EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configure the encoder for interface{} type objects.
+ // If not provided, objects are encoded using json.Encoder
+ NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
+ // Configures the field separator used by the console encoder. Defaults
+ // to tab.
+ ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
+}
+
+// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
+// map- or struct-like object to the logging context. Like maps, ObjectEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ObjectEncoder interface {
+ // Logging-specific marshalers.
+ AddArray(key string, marshaler ArrayMarshaler) error
+ AddObject(key string, marshaler ObjectMarshaler) error
+
+ // Built-in types.
+ AddBinary(key string, value []byte) // for arbitrary bytes
+ AddByteString(key string, value []byte) // for UTF-8 encoded bytes
+ AddBool(key string, value bool)
+ AddComplex128(key string, value complex128)
+ AddComplex64(key string, value complex64)
+ AddDuration(key string, value time.Duration)
+ AddFloat64(key string, value float64)
+ AddFloat32(key string, value float32)
+ AddInt(key string, value int)
+ AddInt64(key string, value int64)
+ AddInt32(key string, value int32)
+ AddInt16(key string, value int16)
+ AddInt8(key string, value int8)
+ AddString(key, value string)
+ AddTime(key string, value time.Time)
+ AddUint(key string, value uint)
+ AddUint64(key string, value uint64)
+ AddUint32(key string, value uint32)
+ AddUint16(key string, value uint16)
+ AddUint8(key string, value uint8)
+ AddUintptr(key string, value uintptr)
+
+ // AddReflected uses reflection to serialize arbitrary objects, so it can be
+ // slow and allocation-heavy.
+ AddReflected(key string, value interface{}) error
+ // OpenNamespace opens an isolated namespace where all subsequent fields will
+ // be added. Applications can use namespaces to prevent key collisions when
+ // injecting loggers into sub-components or third-party libraries.
+ OpenNamespace(key string)
+}
+
+// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
+// array-like objects to the logging context. Of note, it supports mixed-type
+// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ArrayEncoder interface {
+ // Built-in types.
+ PrimitiveArrayEncoder
+
+ // Time-related types.
+ AppendDuration(time.Duration)
+ AppendTime(time.Time)
+
+ // Logging-specific marshalers.
+ AppendArray(ArrayMarshaler) error
+ AppendObject(ObjectMarshaler) error
+
+ // AppendReflected uses reflection to serialize arbitrary objects, so it's
+ // slow and allocation-heavy.
+ AppendReflected(value interface{}) error
+}
+
+// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
+// only in Go's built-in types. It's included only so that Duration- and
+// TimeEncoders cannot trigger infinite recursion.
+type PrimitiveArrayEncoder interface {
+ // Built-in types.
+ AppendBool(bool)
+ AppendByteString([]byte) // for UTF-8 encoded bytes
+ AppendComplex128(complex128)
+ AppendComplex64(complex64)
+ AppendFloat64(float64)
+ AppendFloat32(float32)
+ AppendInt(int)
+ AppendInt64(int64)
+ AppendInt32(int32)
+ AppendInt16(int16)
+ AppendInt8(int8)
+ AppendString(string)
+ AppendUint(uint)
+ AppendUint64(uint64)
+ AppendUint32(uint32)
+ AppendUint16(uint16)
+ AppendUint8(uint8)
+ AppendUintptr(uintptr)
+}
+
+// Encoder is a format-agnostic interface for all log entry marshalers. Since
+// log encoders don't need to support the same wide range of use cases as
+// general-purpose marshalers, it's possible to make them faster and
+// lower-allocation.
+//
+// Implementations of the ObjectEncoder interface's methods can, of course,
+// freely modify the receiver. However, the Clone and EncodeEntry methods will
+// be called concurrently and shouldn't modify the receiver.
+type Encoder interface {
+ ObjectEncoder
+
+ // Clone copies the encoder, ensuring that adding fields to the copy doesn't
+ // affect the original.
+ Clone() Encoder
+
+ // EncodeEntry encodes an entry and fields, along with any accumulated
+ // context, into a byte buffer and returns it. Any fields that are empty,
+ // including fields on the `Entry` type, should be omitted.
+ EncodeEntry(Entry, []Field) (*buffer.Buffer, error)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
new file mode 100644
index 0000000..459a5d7
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -0,0 +1,298 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+ "time"
+
+ "go.uber.org/multierr"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/exit"
+ "go.uber.org/zap/internal/pool"
+)
+
+var _cePool = pool.New(func() *CheckedEntry {
+ // Pre-allocate some space for cores.
+ return &CheckedEntry{
+ cores: make([]Core, 4),
+ }
+})
+
+func getCheckedEntry() *CheckedEntry {
+ ce := _cePool.Get()
+ ce.reset()
+ return ce
+}
+
+func putCheckedEntry(ce *CheckedEntry) {
+ if ce == nil {
+ return
+ }
+ _cePool.Put(ce)
+}
+
+// NewEntryCaller makes an EntryCaller from the return signature of
+// runtime.Caller.
+func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller {
+ if !ok {
+ return EntryCaller{}
+ }
+ return EntryCaller{
+ PC: pc,
+ File: file,
+ Line: line,
+ Defined: true,
+ }
+}
+
+// EntryCaller represents the caller of a logging function.
+type EntryCaller struct {
+ Defined bool
+ PC uintptr
+ File string
+ Line int
+ Function string
+}
+
+// String returns the full path and line number of the caller.
+func (ec EntryCaller) String() string {
+ return ec.FullPath()
+}
+
+// FullPath returns a /full/path/to/package/file:line description of the
+// caller.
+func (ec EntryCaller) FullPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ buf := bufferpool.Get()
+ buf.AppendString(ec.File)
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// TrimmedPath returns a package/file:line description of the caller,
+// preserving only the leaf directory name and file name.
+func (ec EntryCaller) TrimmedPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ // nb. To make sure we trim the path correctly on Windows too, we
+ // counter-intuitively need to use '/' and *not* os.PathSeparator here,
+ // because the path given originates from Go stdlib, specifically
+ // runtime.Caller() which (as of Mar/17) returns forward slashes even on
+ // Windows.
+ //
+ // See https://github.com/golang/go/issues/3335
+ // and https://github.com/golang/go/issues/18151
+ //
+ // for discussion on the issue on Go side.
+ //
+ // Find the last separator.
+ //
+ idx := strings.LastIndexByte(ec.File, '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ // Find the penultimate separator.
+ idx = strings.LastIndexByte(ec.File[:idx], '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ buf := bufferpool.Get()
+ // Keep everything after the penultimate separator.
+ buf.AppendString(ec.File[idx+1:])
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// An Entry represents a complete log message. The entry's structured context
+// is already serialized, but the log level, time, message, and call site
+// information are available for inspection and modification. Any fields left
+// empty will be omitted when encoding.
+//
+// Entries are pooled, so any functions that accept them MUST be careful not to
+// retain references to them.
+type Entry struct {
+ Level Level
+ Time time.Time
+ LoggerName string
+ Message string
+ Caller EntryCaller
+ Stack string
+}
+
+// CheckWriteHook is a custom action that may be executed after an entry is
+// written.
+//
+// Register one on a CheckedEntry with the After method.
+//
+// if ce := logger.Check(...); ce != nil {
+// ce = ce.After(hook)
+// ce.Write(...)
+// }
+//
+// You can configure the hook for Fatal log statements at the logger level with
+// the zap.WithFatalHook option.
+type CheckWriteHook interface {
+ // OnWrite is invoked with the CheckedEntry that was written and a list
+ // of fields added with that entry.
+ //
+ // The list of fields DOES NOT include fields that were already added
+ // to the logger with the With method.
+ OnWrite(*CheckedEntry, []Field)
+}
+
+// CheckWriteAction indicates what action to take after a log entry is
+// processed. Actions are ordered in increasing severity.
+type CheckWriteAction uint8
+
+const (
+ // WriteThenNoop indicates that nothing special needs to be done. It's the
+ // default behavior.
+ WriteThenNoop CheckWriteAction = iota
+ // WriteThenGoexit runs runtime.Goexit after Write.
+ WriteThenGoexit
+ // WriteThenPanic causes a panic after Write.
+ WriteThenPanic
+ // WriteThenFatal causes an os.Exit(1) after Write.
+ WriteThenFatal
+)
+
+// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
+// with the new CheckWriteHook interface which deprecates CheckWriteAction.
+func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
+ switch a {
+ case WriteThenGoexit:
+ runtime.Goexit()
+ case WriteThenPanic:
+ panic(ce.Message)
+ case WriteThenFatal:
+ exit.With(1)
+ }
+}
+
+var _ CheckWriteHook = CheckWriteAction(0)
+
+// CheckedEntry is an Entry together with a collection of Cores that have
+// already agreed to log it.
+//
+// CheckedEntry references should be created by calling AddCore or After on a
+// nil *CheckedEntry. References are returned to a pool after Write, and MUST
+// NOT be retained after calling their Write method.
+type CheckedEntry struct {
+ Entry
+ ErrorOutput WriteSyncer
+ dirty bool // best-effort detection of pool misuse
+ after CheckWriteHook
+ cores []Core
+}
+
+func (ce *CheckedEntry) reset() {
+ ce.Entry = Entry{}
+ ce.ErrorOutput = nil
+ ce.dirty = false
+ ce.after = nil
+ for i := range ce.cores {
+ // don't keep references to cores
+ ce.cores[i] = nil
+ }
+ ce.cores = ce.cores[:0]
+}
+
+// Write writes the entry to the stored Cores, returns any errors, and returns
+// the CheckedEntry reference to a pool for immediate re-use. Finally, it
+// executes any required CheckWriteAction.
+func (ce *CheckedEntry) Write(fields ...Field) {
+ if ce == nil {
+ return
+ }
+
+ if ce.dirty {
+ if ce.ErrorOutput != nil {
+ // Make a best effort to detect unsafe re-use of this CheckedEntry.
+ // If the entry is dirty, log an internal error; because the
+ // CheckedEntry is being used after it was returned to the pool,
+ // the message may be an amalgamation from multiple call sites.
+ fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
+ _ = ce.ErrorOutput.Sync() // ignore error
+ }
+ return
+ }
+ ce.dirty = true
+
+ var err error
+ for i := range ce.cores {
+ err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
+ }
+ if err != nil && ce.ErrorOutput != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ _ = ce.ErrorOutput.Sync() // ignore error
+ }
+
+ hook := ce.after
+ if hook != nil {
+ hook.OnWrite(ce, fields)
+ }
+ putCheckedEntry(ce)
+}
+
+// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
+// used by Core.Check implementations, and is safe to call on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.cores = append(ce.cores, core)
+ return ce
+}
+
+// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
+// Core will panic or fatal after writing this log entry. Like AddCore, it's
+// safe to call on nil CheckedEntry references.
+//
+// Deprecated: Use [CheckedEntry.After] instead.
+func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ return ce.After(ent, should)
+}
+
+// After sets this CheckEntry's CheckWriteHook, which will be called after this
+// log entry has been written. It's safe to call this on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.after = hook
+ return ce
+}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
new file mode 100644
index 0000000..c40df13
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -0,0 +1,136 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.uber.org/zap/internal/pool"
+)
+
+// Encodes the given error into fields of an object. A field with the given
+// name is added for the error message.
+//
+// If the error implements fmt.Formatter, a field with the name ${key}Verbose
+// is also added with the full verbose error message.
+//
+// Finally, if the error implements errorGroup (from go.uber.org/multierr) or
+// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
+// array of objects containing the errors this error was comprised of.
+//
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
+func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
+ // Try to capture panics (from nil references or otherwise) when calling
+ // the Error() method
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ // If it's a nil pointer, just say "". The likeliest causes are a
+ // error that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "" is a nice result.
+ if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
+ enc.AddString(key, "")
+ return
+ }
+
+ retErr = fmt.Errorf("PANIC=%v", rerr)
+ }
+ }()
+
+ basic := err.Error()
+ enc.AddString(key, basic)
+
+ switch e := err.(type) {
+ case errorGroup:
+ return enc.AddArray(key+"Causes", errArray(e.Errors()))
+ case fmt.Formatter:
+ verbose := fmt.Sprintf("%+v", e)
+ if verbose != basic {
+ // This is a rich error type, like those produced by
+ // github.com/pkg/errors.
+ enc.AddString(key+"Verbose", verbose)
+ }
+ }
+ return nil
+}
+
+type errorGroup interface {
+ // Provides read-only access to the underlying list of errors, preferably
+ // without causing any allocs.
+ Errors() []error
+}
+
+// Note that errArray and errArrayElem are very similar to the version
+// implemented in the top-level error.go file. We can't re-use this because
+// that would require exporting errArray as part of the zapcore API.
+
+// Encodes a list of errors using the standard error encoding logic.
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+
+ el := newErrArrayElem(errs[i])
+ err := arr.AppendObject(el)
+ el.Free()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var _errArrayElemPool = pool.New(func() *errArrayElem {
+ return &errArrayElem{}
+})
+
+// Encodes any error into a {"error": ...} re-using the same errors logic.
+//
+// May be passed in place of an array to build a single-element array.
+type errArrayElem struct{ err error }
+
+func newErrArrayElem(err error) *errArrayElem {
+ e := _errArrayElemPool.Get()
+ e.err = err
+ return e
+}
+
+func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error {
+ return arr.AppendObject(e)
+}
+
+func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error {
+ return encodeError("error", e.err, enc)
+}
+
+func (e *errArrayElem) Free() {
+ e.err = nil
+ _errArrayElemPool.Put(e)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
new file mode 100644
index 0000000..308c978
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -0,0 +1,233 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+)
+
+// A FieldType indicates which member of the Field union struct should be used
+// and how it should be serialized.
+type FieldType uint8
+
+const (
+ // UnknownType is the default field type. Attempting to add it to an encoder will panic.
+ UnknownType FieldType = iota
+ // ArrayMarshalerType indicates that the field carries an ArrayMarshaler.
+ ArrayMarshalerType
+ // ObjectMarshalerType indicates that the field carries an ObjectMarshaler.
+ ObjectMarshalerType
+ // BinaryType indicates that the field carries an opaque binary blob.
+ BinaryType
+ // BoolType indicates that the field carries a bool.
+ BoolType
+ // ByteStringType indicates that the field carries UTF-8 encoded bytes.
+ ByteStringType
+ // Complex128Type indicates that the field carries a complex128.
+ Complex128Type
+ // Complex64Type indicates that the field carries a complex64.
+ Complex64Type
+ // DurationType indicates that the field carries a time.Duration.
+ DurationType
+ // Float64Type indicates that the field carries a float64.
+ Float64Type
+ // Float32Type indicates that the field carries a float32.
+ Float32Type
+ // Int64Type indicates that the field carries an int64.
+ Int64Type
+ // Int32Type indicates that the field carries an int32.
+ Int32Type
+ // Int16Type indicates that the field carries an int16.
+ Int16Type
+ // Int8Type indicates that the field carries an int8.
+ Int8Type
+ // StringType indicates that the field carries a string.
+ StringType
+ // TimeType indicates that the field carries a time.Time that is
+ // representable by a UnixNano() stored as an int64.
+ TimeType
+ // TimeFullType indicates that the field carries a time.Time stored as-is.
+ TimeFullType
+ // Uint64Type indicates that the field carries a uint64.
+ Uint64Type
+ // Uint32Type indicates that the field carries a uint32.
+ Uint32Type
+ // Uint16Type indicates that the field carries a uint16.
+ Uint16Type
+ // Uint8Type indicates that the field carries a uint8.
+ Uint8Type
+ // UintptrType indicates that the field carries a uintptr.
+ UintptrType
+ // ReflectType indicates that the field carries an interface{}, which should
+ // be serialized using reflection.
+ ReflectType
+ // NamespaceType signals the beginning of an isolated namespace. All
+ // subsequent fields should be added to the new namespace.
+ NamespaceType
+ // StringerType indicates that the field carries a fmt.Stringer.
+ StringerType
+ // ErrorType indicates that the field carries an error.
+ ErrorType
+ // SkipType indicates that the field is a no-op.
+ SkipType
+
+ // InlineMarshalerType indicates that the field carries an ObjectMarshaler
+ // that should be inlined.
+ InlineMarshalerType
+)
+
+// A Field is a marshaling operation used to add a key-value pair to a logger's
+// context. Most fields are lazily marshaled, so it's inexpensive to add fields
+// to disabled debug-level log statements.
+type Field struct {
+ Key string
+ Type FieldType
+ Integer int64
+ String string
+ Interface interface{}
+}
+
+// AddTo exports a field through the ObjectEncoder interface. It's primarily
+// useful to library authors, and shouldn't be necessary in most applications.
+func (f Field) AddTo(enc ObjectEncoder) {
+ var err error
+
+ switch f.Type {
+ case ArrayMarshalerType:
+ err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler))
+ case ObjectMarshalerType:
+ err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler))
+ case InlineMarshalerType:
+ err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc)
+ case BinaryType:
+ enc.AddBinary(f.Key, f.Interface.([]byte))
+ case BoolType:
+ enc.AddBool(f.Key, f.Integer == 1)
+ case ByteStringType:
+ enc.AddByteString(f.Key, f.Interface.([]byte))
+ case Complex128Type:
+ enc.AddComplex128(f.Key, f.Interface.(complex128))
+ case Complex64Type:
+ enc.AddComplex64(f.Key, f.Interface.(complex64))
+ case DurationType:
+ enc.AddDuration(f.Key, time.Duration(f.Integer))
+ case Float64Type:
+ enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer)))
+ case Float32Type:
+ enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer)))
+ case Int64Type:
+ enc.AddInt64(f.Key, f.Integer)
+ case Int32Type:
+ enc.AddInt32(f.Key, int32(f.Integer))
+ case Int16Type:
+ enc.AddInt16(f.Key, int16(f.Integer))
+ case Int8Type:
+ enc.AddInt8(f.Key, int8(f.Integer))
+ case StringType:
+ enc.AddString(f.Key, f.String)
+ case TimeType:
+ if f.Interface != nil {
+ enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location)))
+ } else {
+ // Fall back to UTC if location is nil.
+ enc.AddTime(f.Key, time.Unix(0, f.Integer))
+ }
+ case TimeFullType:
+ enc.AddTime(f.Key, f.Interface.(time.Time))
+ case Uint64Type:
+ enc.AddUint64(f.Key, uint64(f.Integer))
+ case Uint32Type:
+ enc.AddUint32(f.Key, uint32(f.Integer))
+ case Uint16Type:
+ enc.AddUint16(f.Key, uint16(f.Integer))
+ case Uint8Type:
+ enc.AddUint8(f.Key, uint8(f.Integer))
+ case UintptrType:
+ enc.AddUintptr(f.Key, uintptr(f.Integer))
+ case ReflectType:
+ err = enc.AddReflected(f.Key, f.Interface)
+ case NamespaceType:
+ enc.OpenNamespace(f.Key)
+ case StringerType:
+ err = encodeStringer(f.Key, f.Interface, enc)
+ case ErrorType:
+ err = encodeError(f.Key, f.Interface.(error), enc)
+ case SkipType:
+ break
+ default:
+ panic(fmt.Sprintf("unknown field type: %v", f))
+ }
+
+ if err != nil {
+ enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error())
+ }
+}
+
+// Equals returns whether two fields are equal. For non-primitive types such as
+// errors, marshalers, or reflect types, it uses reflect.DeepEqual.
+func (f Field) Equals(other Field) bool {
+ if f.Type != other.Type {
+ return false
+ }
+ if f.Key != other.Key {
+ return false
+ }
+
+ switch f.Type {
+ case BinaryType, ByteStringType:
+ return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte))
+ case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType:
+ return reflect.DeepEqual(f.Interface, other.Interface)
+ default:
+ return f == other
+ }
+}
+
+func addFields(enc ObjectEncoder, fields []Field) {
+ for i := range fields {
+ fields[i].AddTo(enc)
+ }
+}
+
+func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) {
+ // Try to capture panics (from nil references or otherwise) when calling
+ // the String() method, similar to https://golang.org/src/fmt/print.go#L540
+ defer func() {
+ if err := recover(); err != nil {
+ // If it's a nil pointer, just say "". The likeliest causes are a
+ // Stringer that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "" is a nice result.
+ if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() {
+ enc.AddString(key, "")
+ return
+ }
+
+ retErr = fmt.Errorf("PANIC=%v", err)
+ }
+ }()
+
+ enc.AddString(key, stringer.(fmt.Stringer).String())
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
new file mode 100644
index 0000000..198def9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type hooked struct {
+ Core
+ funcs []func(Entry) error
+}
+
+var (
+ _ Core = (*hooked)(nil)
+ _ leveledEnabler = (*hooked)(nil)
+)
+
+// RegisterHooks wraps a Core and runs a collection of user-defined callback
+// hooks each time a message is logged. Execution of the callbacks is blocking.
+//
+// This offers users an easy way to register simple callbacks (e.g., metrics
+// collection) without implementing the full Core interface.
+func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
+ funcs := append([]func(Entry) error{}, hooks...)
+ return &hooked{
+ Core: core,
+ funcs: funcs,
+ }
+}
+
+func (h *hooked) Level() Level {
+ return LevelOf(h.Core)
+}
+
+func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ // Let the wrapped Core decide whether to log this message or not. This
+ // also gives the downstream a chance to register itself directly with the
+ // CheckedEntry.
+ if downstream := h.Core.Check(ent, ce); downstream != nil {
+ return downstream.AddCore(ent, h)
+ }
+ return ce
+}
+
+func (h *hooked) With(fields []Field) Core {
+ return &hooked{
+ Core: h.Core.With(fields),
+ funcs: h.funcs,
+ }
+}
+
+func (h *hooked) Write(ent Entry, _ []Field) error {
+ // Since our downstream had a chance to register itself directly with the
+ // CheckedMessage, we don't need to call it here.
+ var err error
+ for i := range h.funcs {
+ err = multierr.Append(err, h.funcs[i](ent))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
new file mode 100644
index 0000000..7a11237
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "fmt"
+
+type levelFilterCore struct {
+ core Core
+ level LevelEnabler
+}
+
+var (
+ _ Core = (*levelFilterCore)(nil)
+ _ leveledEnabler = (*levelFilterCore)(nil)
+)
+
+// NewIncreaseLevelCore creates a core that can be used to increase the level of
+// an existing Core. It cannot be used to decrease the logging level, as it acts
+// as a filter before calling the underlying core. If level decreases the log level,
+// an error is returned.
+func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) {
+ for l := _maxLevel; l >= _minLevel; l-- {
+ if !core.Enabled(l) && level.Enabled(l) {
+ return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l)
+ }
+ }
+
+ return &levelFilterCore{core, level}, nil
+}
+
+func (c *levelFilterCore) Enabled(lvl Level) bool {
+ return c.level.Enabled(lvl)
+}
+
+func (c *levelFilterCore) Level() Level {
+ return LevelOf(c.level)
+}
+
+func (c *levelFilterCore) With(fields []Field) Core {
+ return &levelFilterCore{c.core.With(fields), c.level}
+}
+
+func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !c.Enabled(ent.Level) {
+ return ce
+ }
+
+ return c.core.Check(ent, ce)
+}
+
+func (c *levelFilterCore) Write(ent Entry, fields []Field) error {
+ return c.core.Write(ent, fields)
+}
+
+func (c *levelFilterCore) Sync() error {
+ return c.core.Sync()
+}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
new file mode 100644
index 0000000..9685169
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -0,0 +1,583 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/base64"
+ "math"
+ "time"
+ "unicode/utf8"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
+)
+
+// For JSON-escaping; see jsonEncoder.safeAddString below.
+const _hex = "0123456789abcdef"
+
+var _jsonPool = pool.New(func() *jsonEncoder {
+ return &jsonEncoder{}
+})
+
+func putJSONEncoder(enc *jsonEncoder) {
+ if enc.reflectBuf != nil {
+ enc.reflectBuf.Free()
+ }
+ enc.EncoderConfig = nil
+ enc.buf = nil
+ enc.spaced = false
+ enc.openNamespaces = 0
+ enc.reflectBuf = nil
+ enc.reflectEnc = nil
+ _jsonPool.Put(enc)
+}
+
+type jsonEncoder struct {
+ *EncoderConfig
+ buf *buffer.Buffer
+ spaced bool // include spaces after colons and commas
+ openNamespaces int
+
+ // for encoding generic values by reflection
+ reflectBuf *buffer.Buffer
+ reflectEnc ReflectedEncoder
+}
+
+// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
+// appropriately escapes all field keys and values.
+//
+// Note that the encoder doesn't deduplicate keys, so it's possible to produce
+// a message like
+//
+// {"foo":"bar","foo":"baz"}
+//
+// This is permitted by the JSON specification, but not encouraged. Many
+// libraries will ignore duplicate key-value pairs (typically keeping the last
+// pair) when unmarshaling, but users should attempt to avoid adding duplicate
+// keys.
+func NewJSONEncoder(cfg EncoderConfig) Encoder {
+ return newJSONEncoder(cfg, false)
+}
+
+func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ if cfg.SkipLineEnding {
+ cfg.LineEnding = ""
+ } else if cfg.LineEnding == "" {
+ cfg.LineEnding = DefaultLineEnding
+ }
+
+ // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+ if cfg.NewReflectedEncoder == nil {
+ cfg.NewReflectedEncoder = defaultReflectedEncoder
+ }
+
+ return &jsonEncoder{
+ EncoderConfig: &cfg,
+ buf: bufferpool.Get(),
+ spaced: spaced,
+ }
+}
+
+func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendArray(arr)
+}
+
+func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendObject(obj)
+}
+
+func (enc *jsonEncoder) AddBinary(key string, val []byte) {
+ enc.AddString(key, base64.StdEncoding.EncodeToString(val))
+}
+
+func (enc *jsonEncoder) AddByteString(key string, val []byte) {
+ enc.addKey(key)
+ enc.AppendByteString(val)
+}
+
+func (enc *jsonEncoder) AddBool(key string, val bool) {
+ enc.addKey(key)
+ enc.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
+ enc.addKey(key)
+ enc.AppendComplex128(val)
+}
+
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+ enc.addKey(key)
+ enc.AppendComplex64(val)
+}
+
+func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
+ enc.addKey(key)
+ enc.AppendDuration(val)
+}
+
+func (enc *jsonEncoder) AddFloat64(key string, val float64) {
+ enc.addKey(key)
+ enc.AppendFloat64(val)
+}
+
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
+func (enc *jsonEncoder) AddInt64(key string, val int64) {
+ enc.addKey(key)
+ enc.AppendInt64(val)
+}
+
+func (enc *jsonEncoder) resetReflectBuf() {
+ if enc.reflectBuf == nil {
+ enc.reflectBuf = bufferpool.Get()
+ enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
+ } else {
+ enc.reflectBuf.Reset()
+ }
+}
+
+var nullLiteralBytes = []byte("null")
+
+// Only invoke the standard JSON encoder if there is actually something to
+// encode; otherwise write JSON null literal directly.
+func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) {
+ if obj == nil {
+ return nullLiteralBytes, nil
+ }
+ enc.resetReflectBuf()
+ if err := enc.reflectEnc.Encode(obj); err != nil {
+ return nil, err
+ }
+ enc.reflectBuf.TrimNewline()
+ return enc.reflectBuf.Bytes(), nil
+}
+
+func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
+ valueBytes, err := enc.encodeReflected(obj)
+ if err != nil {
+ return err
+ }
+ enc.addKey(key)
+ _, err = enc.buf.Write(valueBytes)
+ return err
+}
+
+func (enc *jsonEncoder) OpenNamespace(key string) {
+ enc.addKey(key)
+ enc.buf.AppendByte('{')
+ enc.openNamespaces++
+}
+
+func (enc *jsonEncoder) AddString(key, val string) {
+ enc.addKey(key)
+ enc.AppendString(val)
+}
+
+func (enc *jsonEncoder) AddTime(key string, val time.Time) {
+ enc.addKey(key)
+ enc.AppendTime(val)
+}
+
+func (enc *jsonEncoder) AddUint64(key string, val uint64) {
+ enc.addKey(key)
+ enc.AppendUint64(val)
+}
+
+func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('[')
+ err := arr.MarshalLogArray(enc)
+ enc.buf.AppendByte(']')
+ return err
+}
+
+func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ // Close ONLY new openNamespaces that are created during
+ // AppendObject().
+ old := enc.openNamespaces
+ enc.openNamespaces = 0
+ enc.addElementSeparator()
+ enc.buf.AppendByte('{')
+ err := obj.MarshalLogObject(enc)
+ enc.buf.AppendByte('}')
+ enc.closeOpenNamespaces()
+ enc.openNamespaces = old
+ return err
+}
+
+func (enc *jsonEncoder) AppendBool(val bool) {
+ enc.addElementSeparator()
+ enc.buf.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AppendByteString(val []byte) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddByteString(val)
+ enc.buf.AppendByte('"')
+}
+
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
+ enc.addElementSeparator()
+ // Cast to a platform-independent, fixed-size type.
+ r, i := float64(real(val)), float64(imag(val))
+ enc.buf.AppendByte('"')
+ // Because we're always in a quoted string, we can use strconv without
+ // special-casing NaN and +/-Inf.
+ enc.buf.AppendFloat(r, precision)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, precision)
+ enc.buf.AppendByte('i')
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendDuration(val time.Duration) {
+ cur := enc.buf.Len()
+ if e := enc.EncodeDuration; e != nil {
+ e(val, enc)
+ }
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
+ // JSON valid.
+ enc.AppendInt64(int64(val))
+ }
+}
+
+func (enc *jsonEncoder) AppendInt64(val int64) {
+ enc.addElementSeparator()
+ enc.buf.AppendInt(val)
+}
+
+func (enc *jsonEncoder) AppendReflected(val interface{}) error {
+ valueBytes, err := enc.encodeReflected(val)
+ if err != nil {
+ return err
+ }
+ enc.addElementSeparator()
+ _, err = enc.buf.Write(valueBytes)
+ return err
+}
+
+func (enc *jsonEncoder) AppendString(val string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(val)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.buf.AppendTime(time, layout)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTime(val time.Time) {
+ cur := enc.buf.Len()
+ if e := enc.EncodeTime; e != nil {
+ e(val, enc)
+ }
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
+ // output JSON valid.
+ enc.AppendInt64(val.UnixNano())
+ }
+}
+
+func (enc *jsonEncoder) AppendUint64(val uint64) {
+ enc.addElementSeparator()
+ enc.buf.AppendUint(val)
+}
+
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+
+func (enc *jsonEncoder) Clone() Encoder {
+ clone := enc.clone()
+ clone.buf.Write(enc.buf.Bytes())
+ return clone
+}
+
+func (enc *jsonEncoder) clone() *jsonEncoder {
+ clone := _jsonPool.Get()
+ clone.EncoderConfig = enc.EncoderConfig
+ clone.spaced = enc.spaced
+ clone.openNamespaces = enc.openNamespaces
+ clone.buf = bufferpool.Get()
+ return clone
+}
+
+func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ final := enc.clone()
+ final.buf.AppendByte('{')
+
+ if final.LevelKey != "" && final.EncodeLevel != nil {
+ final.addKey(final.LevelKey)
+ cur := final.buf.Len()
+ final.EncodeLevel(ent.Level, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeLevel was a no-op. Fall back to strings to keep
+ // output JSON valid.
+ final.AppendString(ent.Level.String())
+ }
+ }
+ if final.TimeKey != "" && !ent.Time.IsZero() {
+ final.AddTime(final.TimeKey, ent.Time)
+ }
+ if ent.LoggerName != "" && final.NameKey != "" {
+ final.addKey(final.NameKey)
+ cur := final.buf.Len()
+ nameEncoder := final.EncodeName
+
+ // if no name encoder provided, fall back to FullNameEncoder for backwards
+ // compatibility
+ if nameEncoder == nil {
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeName was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.LoggerName)
+ }
+ }
+ if ent.Caller.Defined {
+ if final.CallerKey != "" {
+ final.addKey(final.CallerKey)
+ cur := final.buf.Len()
+ final.EncodeCaller(ent.Caller, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeCaller was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.Caller.String())
+ }
+ }
+ if final.FunctionKey != "" {
+ final.addKey(final.FunctionKey)
+ final.AppendString(ent.Caller.Function)
+ }
+ }
+ if final.MessageKey != "" {
+ final.addKey(enc.MessageKey)
+ final.AppendString(ent.Message)
+ }
+ if enc.buf.Len() > 0 {
+ final.addElementSeparator()
+ final.buf.Write(enc.buf.Bytes())
+ }
+ addFields(final, fields)
+ final.closeOpenNamespaces()
+ if ent.Stack != "" && final.StacktraceKey != "" {
+ final.AddString(final.StacktraceKey, ent.Stack)
+ }
+ final.buf.AppendByte('}')
+ final.buf.AppendString(final.LineEnding)
+
+ ret := final.buf
+ putJSONEncoder(final)
+ return ret, nil
+}
+
+func (enc *jsonEncoder) truncate() {
+ enc.buf.Reset()
+}
+
+func (enc *jsonEncoder) closeOpenNamespaces() {
+ for i := 0; i < enc.openNamespaces; i++ {
+ enc.buf.AppendByte('}')
+ }
+ enc.openNamespaces = 0
+}
+
+func (enc *jsonEncoder) addKey(key string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(key)
+ enc.buf.AppendByte('"')
+ enc.buf.AppendByte(':')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+}
+
+func (enc *jsonEncoder) addElementSeparator() {
+ last := enc.buf.Len() - 1
+ if last < 0 {
+ return
+ }
+ switch enc.buf.Bytes()[last] {
+ case '{', '[', ':', ',', ' ':
+ return
+ default:
+ enc.buf.AppendByte(',')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+ }
+}
+
+func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
+ enc.addElementSeparator()
+ switch {
+ case math.IsNaN(val):
+ enc.buf.AppendString(`"NaN"`)
+ case math.IsInf(val, 1):
+ enc.buf.AppendString(`"+Inf"`)
+ case math.IsInf(val, -1):
+ enc.buf.AppendString(`"-Inf"`)
+ default:
+ enc.buf.AppendFloat(val, bitSize)
+ }
+}
+
+// safeAddString JSON-escapes a string and appends it to the internal buffer.
+// Unlike the standard library's encoder, it doesn't attempt to protect the
+// user from browser vulnerabilities or JSONP-related problems.
+func (enc *jsonEncoder) safeAddString(s string) {
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendString,
+ utf8.DecodeRuneInString,
+ enc.buf,
+ s,
+ )
+}
+
+// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
+func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendBytes,
+ utf8.DecodeRune,
+ enc.buf,
+ s,
+ )
+}
+
+// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
+// It appends a string or byte slice to the buffer, escaping all special characters.
+func safeAppendStringLike[S []byte | string](
+ // appendTo appends this string-like object to the buffer.
+ appendTo func(*buffer.Buffer, S),
+ // decodeRune decodes the next rune from the string-like object
+ // and returns its value and width in bytes.
+ decodeRune func(S) (rune, int),
+ buf *buffer.Buffer,
+ s S,
+) {
+ // The encoding logic below works by skipping over characters
+ // that can be safely copied as-is,
+ // until a character is found that needs special handling.
+ // At that point, we copy everything we've seen so far,
+ // and then handle that special character.
+ //
+ // last is the index of the last byte that was copied to the buffer.
+ last := 0
+ for i := 0; i < len(s); {
+ if s[i] >= utf8.RuneSelf {
+ // Character >= RuneSelf may be part of a multi-byte rune.
+ // They need to be decoded before we can decide how to handle them.
+ r, size := decodeRune(s[i:])
+ if r != utf8.RuneError || size != 1 {
+ // No special handling required.
+ // Skip over this rune and continue.
+ i += size
+ continue
+ }
+
+ // Invalid UTF-8 sequence.
+ // Replace it with the Unicode replacement character.
+ appendTo(buf, s[last:i])
+ buf.AppendString(`\ufffd`)
+
+ i++
+ last = i
+ } else {
+ // Character < RuneSelf is a single-byte UTF-8 rune.
+ if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
+ // No escaping necessary.
+ // Skip over this character and continue.
+ i++
+ continue
+ }
+
+ // This character needs to be escaped.
+ appendTo(buf, s[last:i])
+ switch s[i] {
+ case '\\', '"':
+ buf.AppendByte('\\')
+ buf.AppendByte(s[i])
+ case '\n':
+ buf.AppendByte('\\')
+ buf.AppendByte('n')
+ case '\r':
+ buf.AppendByte('\\')
+ buf.AppendByte('r')
+ case '\t':
+ buf.AppendByte('\\')
+ buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ buf.AppendString(`\u00`)
+ buf.AppendByte(_hex[s[i]>>4])
+ buf.AppendByte(_hex[s[i]&0xF])
+ }
+
+ i++
+ last = i
+ }
+ }
+
+ // add remaining
+ appendTo(buf, s[last:])
+}
diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go
new file mode 100644
index 0000000..05288d6
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "sync"
+
+type lazyWithCore struct {
+ Core
+ sync.Once
+ fields []Field
+}
+
+// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
+// the logger is written to (or is further chained in a lon-lazy manner).
+func NewLazyWith(core Core, fields []Field) Core {
+ return &lazyWithCore{
+ Core: core,
+ fields: fields,
+ }
+}
+
+func (d *lazyWithCore) initOnce() {
+ d.Once.Do(func() {
+ d.Core = d.Core.With(d.fields)
+ })
+}
+
+func (d *lazyWithCore) With(fields []Field) Core {
+ d.initOnce()
+ return d.Core.With(fields)
+}
+
+func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
+ d.initOnce()
+ return d.Core.Check(e, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
new file mode 100644
index 0000000..e01a241
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -0,0 +1,229 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level")
+
+// A Level is a logging priority. Higher levels are more important.
+type Level int8
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel Level = iota - 1
+ // InfoLevel is the default logging priority.
+ InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel
+
+ _minLevel = DebugLevel
+ _maxLevel = FatalLevel
+
+ // InvalidLevel is an invalid value for Level.
+ //
+ // Core implementations may panic if they see messages of this level.
+ InvalidLevel = _maxLevel + 1
+)
+
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+ var level Level
+ err := level.UnmarshalText([]byte(text))
+ return level, err
+}
+
+type leveledEnabler interface {
+ LevelEnabler
+
+ Level() Level
+}
+
+// LevelOf reports the minimum enabled log level for the given LevelEnabler
+// from Zap's supported log levels, or [InvalidLevel] if none of them are
+// enabled.
+//
+// A LevelEnabler may implement a 'Level() Level' method to override the
+// behavior of this function.
+//
+// func (c *core) Level() Level {
+// return c.currentLevel
+// }
+//
+// It is recommended that [Core] implementations that wrap other cores use
+// LevelOf to retrieve the level of the wrapped core. For example,
+//
+// func (c *coreWrapper) Level() Level {
+// return zapcore.LevelOf(c.wrappedCore)
+// }
+func LevelOf(enab LevelEnabler) Level {
+ if lvler, ok := enab.(leveledEnabler); ok {
+ return lvler.Level()
+ }
+
+ for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
+ if enab.Enabled(lvl) {
+ return lvl
+ }
+ }
+
+ return InvalidLevel
+}
+
+// String returns a lower-case ASCII representation of the log level.
+func (l Level) String() string {
+ switch l {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warn"
+ case ErrorLevel:
+ return "error"
+ case DPanicLevel:
+ return "dpanic"
+ case PanicLevel:
+ return "panic"
+ case FatalLevel:
+ return "fatal"
+ default:
+ return fmt.Sprintf("Level(%d)", l)
+ }
+}
+
+// CapitalString returns an all-caps ASCII representation of the log level.
+func (l Level) CapitalString() string {
+ // Printing levels in all-caps is common enough that we should export this
+ // functionality.
+ switch l {
+ case DebugLevel:
+ return "DEBUG"
+ case InfoLevel:
+ return "INFO"
+ case WarnLevel:
+ return "WARN"
+ case ErrorLevel:
+ return "ERROR"
+ case DPanicLevel:
+ return "DPANIC"
+ case PanicLevel:
+ return "PANIC"
+ case FatalLevel:
+ return "FATAL"
+ default:
+ return fmt.Sprintf("LEVEL(%d)", l)
+ }
+}
+
+// MarshalText marshals the Level to text. Note that the text representation
+// drops the -Level suffix (see example).
+func (l Level) MarshalText() ([]byte, error) {
+ return []byte(l.String()), nil
+}
+
+// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText
+// expects the text representation of a Level to drop the -Level suffix (see
+// example).
+//
+// In particular, this makes it easy to configure logging levels using YAML,
+// TOML, or JSON files.
+func (l *Level) UnmarshalText(text []byte) error {
+ if l == nil {
+ return errUnmarshalNilLevel
+ }
+ if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) {
+ return fmt.Errorf("unrecognized level: %q", text)
+ }
+ return nil
+}
+
+func (l *Level) unmarshalText(text []byte) bool {
+ switch string(text) {
+ case "debug", "DEBUG":
+ *l = DebugLevel
+ case "info", "INFO", "": // make the zero value useful
+ *l = InfoLevel
+ case "warn", "WARN":
+ *l = WarnLevel
+ case "error", "ERROR":
+ *l = ErrorLevel
+ case "dpanic", "DPANIC":
+ *l = DPanicLevel
+ case "panic", "PANIC":
+ *l = PanicLevel
+ case "fatal", "FATAL":
+ *l = FatalLevel
+ default:
+ return false
+ }
+ return true
+}
+
+// Set sets the level for the flag.Value interface.
+func (l *Level) Set(s string) error {
+ return l.UnmarshalText([]byte(s))
+}
+
+// Get gets the level for the flag.Getter interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Enabled returns true if the given level is at or above this level.
+func (l Level) Enabled(lvl Level) bool {
+ return lvl >= l
+}
+
+// LevelEnabler decides whether a given logging level is enabled when logging a
+// message.
+//
+// Enablers are intended to be used to implement deterministic filters;
+// concerns like sampling are better implemented as a Core.
+//
+// Each concrete Level value implements a static LevelEnabler which returns
+// true for itself and all higher logging levels. For example WarnLevel.Enabled()
+// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and
+// FatalLevel, but return false for InfoLevel and DebugLevel.
+type LevelEnabler interface {
+ Enabled(Level) bool
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go
new file mode 100644
index 0000000..7af8dad
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level_strings.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/zap/internal/color"
+
+var (
+ _levelToColor = map[Level]color.Color{
+ DebugLevel: color.Magenta,
+ InfoLevel: color.Blue,
+ WarnLevel: color.Yellow,
+ ErrorLevel: color.Red,
+ DPanicLevel: color.Red,
+ PanicLevel: color.Red,
+ FatalLevel: color.Red,
+ }
+ _unknownLevelColor = color.Red
+
+ _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor))
+ _levelToCapitalColorString = make(map[Level]string, len(_levelToColor))
+)
+
+func init() {
+ for level, color := range _levelToColor {
+ _levelToLowercaseColorString[level] = color.Add(level.String())
+ _levelToCapitalColorString[level] = color.Add(level.CapitalString())
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go
new file mode 100644
index 0000000..c3c55ba
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/marshaler.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// ObjectMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+//
+// Note: ObjectMarshaler is only used when zap.Object is used or when
+// passed directly to zap.Any. It is not used when reflection-based
+// encoding is used.
+type ObjectMarshaler interface {
+ MarshalLogObject(ObjectEncoder) error
+}
+
+// ObjectMarshalerFunc is a type adapter that turns a function into an
+// ObjectMarshaler.
+type ObjectMarshalerFunc func(ObjectEncoder) error
+
+// MarshalLogObject calls the underlying function.
+func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
+ return f(enc)
+}
+
+// ArrayMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+//
+// Note: ArrayMarshaler is only used when zap.Array is used or when
+// passed directly to zap.Any. It is not used when reflection-based
+// encoding is used.
+type ArrayMarshaler interface {
+ MarshalLogArray(ArrayEncoder) error
+}
+
+// ArrayMarshalerFunc is a type adapter that turns a function into an
+// ArrayMarshaler.
+type ArrayMarshalerFunc func(ArrayEncoder) error
+
+// MarshalLogArray calls the underlying function.
+func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
+ return f(enc)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
new file mode 100644
index 0000000..dfead08
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// MapObjectEncoder is an ObjectEncoder backed by a simple
+// map[string]interface{}. It's not fast enough for production use, but it's
+// helpful in tests.
+type MapObjectEncoder struct {
+ // Fields contains the entire encoded log context.
+ Fields map[string]interface{}
+ // cur is a pointer to the namespace we're currently writing to.
+ cur map[string]interface{}
+}
+
+// NewMapObjectEncoder creates a new map-backed ObjectEncoder.
+func NewMapObjectEncoder() *MapObjectEncoder {
+ m := make(map[string]interface{})
+ return &MapObjectEncoder{
+ Fields: m,
+ cur: m,
+ }
+}
+
+// AddArray implements ObjectEncoder.
+func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error {
+ arr := &sliceArrayEncoder{elems: make([]interface{}, 0)}
+ err := v.MarshalLogArray(arr)
+ m.cur[key] = arr.elems
+ return err
+}
+
+// AddObject implements ObjectEncoder.
+func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error {
+ newMap := NewMapObjectEncoder()
+ m.cur[k] = newMap.Fields
+ return v.MarshalLogObject(newMap)
+}
+
+// AddBinary implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v }
+
+// AddByteString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) }
+
+// AddBool implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v }
+
+// AddDuration implements ObjectEncoder.
+func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v }
+
+// AddComplex128 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v }
+
+// AddComplex64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v }
+
+// AddFloat64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v }
+
+// AddFloat32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v }
+
+// AddInt implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v }
+
+// AddInt64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v }
+
+// AddInt32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v }
+
+// AddInt16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v }
+
+// AddInt8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v }
+
+// AddString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v }
+
+// AddTime implements ObjectEncoder.
+func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v }
+
+// AddUint implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v }
+
+// AddUint64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v }
+
+// AddUint32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v }
+
+// AddUint16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v }
+
+// AddUint8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v }
+
+// AddUintptr implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v }
+
+// AddReflected implements ObjectEncoder.
+func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error {
+ m.cur[k] = v
+ return nil
+}
+
+// OpenNamespace implements ObjectEncoder.
+func (m *MapObjectEncoder) OpenNamespace(k string) {
+ ns := make(map[string]interface{})
+ m.cur[k] = ns
+ m.cur = ns
+}
+
+// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like
+// the MapObjectEncoder, it's not designed for production use.
+type sliceArrayEncoder struct {
+ elems []interface{}
+}
+
+func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error {
+ enc := &sliceArrayEncoder{}
+ err := v.MarshalLogArray(enc)
+ s.elems = append(s.elems, enc.elems)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error {
+ m := NewMapObjectEncoder()
+ err := v.MarshalLogObject(m)
+ s.elems = append(s.elems, m.Fields)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendReflected(v interface{}) error {
+ s.elems = append(s.elems, v)
+ return nil
+}
+
+func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) }
+func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) }
diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
new file mode 100644
index 0000000..8746360
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+ // Encode encodes and writes to the underlying data stream.
+ Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+ enc := json.NewEncoder(w)
+ // For consistency with our custom JSON encoder.
+ enc.SetEscapeHTML(false)
+ return enc
+}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
new file mode 100644
index 0000000..b7c093a
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -0,0 +1,229 @@
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "sync/atomic"
+ "time"
+)
+
+const (
+ _numLevels = _maxLevel - _minLevel + 1
+ _countersPerLevel = 4096
+)
+
+type counter struct {
+ resetAt atomic.Int64
+ counter atomic.Uint64
+}
+
+type counters [_numLevels][_countersPerLevel]counter
+
+func newCounters() *counters {
+ return &counters{}
+}
+
+func (cs *counters) get(lvl Level, key string) *counter {
+ i := lvl - _minLevel
+ j := fnv32a(key) % _countersPerLevel
+ return &cs[i][j]
+}
+
+// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc
+func fnv32a(s string) uint32 {
+ const (
+ offset32 = 2166136261
+ prime32 = 16777619
+ )
+ hash := uint32(offset32)
+ for i := 0; i < len(s); i++ {
+ hash ^= uint32(s[i])
+ hash *= prime32
+ }
+ return hash
+}
+
+func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
+ tn := t.UnixNano()
+ resetAfter := c.resetAt.Load()
+ if resetAfter > tn {
+ return c.counter.Add(1)
+ }
+
+ c.counter.Store(1)
+
+ newResetAfter := tn + tick.Nanoseconds()
+ if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
+ // We raced with another goroutine trying to reset, and it also reset
+ // the counter to 1, so we need to reincrement the counter.
+ return c.counter.Add(1)
+ }
+
+ return 1
+}
+
+// SamplingDecision is a decision represented as a bit field made by sampler.
+// More decisions may be added in the future.
+type SamplingDecision uint32
+
+const (
+ // LogDropped indicates that the Sampler dropped a log entry.
+ LogDropped SamplingDecision = 1 << iota
+ // LogSampled indicates that the Sampler sampled a log entry.
+ LogSampled
+)
+
+// optionFunc wraps a func so it satisfies the SamplerOption interface.
+type optionFunc func(*sampler)
+
+func (f optionFunc) apply(s *sampler) {
+ f(s)
+}
+
+// SamplerOption configures a Sampler.
+type SamplerOption interface {
+ apply(*sampler)
+}
+
+// nopSamplingHook is the default hook used by sampler.
+func nopSamplingHook(Entry, SamplingDecision) {}
+
+// SamplerHook registers a function which will be called when Sampler makes a
+// decision.
+//
+// This hook may be used to get visibility into the performance of the sampler.
+// For example, use it to track metrics of dropped versus sampled logs.
+//
+// var dropped atomic.Int64
+// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+// if dec&zapcore.LogDropped > 0 {
+// dropped.Inc()
+// }
+// })
+func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
+ return optionFunc(func(s *sampler) {
+ s.hook = hook
+ })
+}
+
+// NewSamplerWithOptions creates a Core that samples incoming entries, which
+// caps the CPU and I/O load of logging while attempting to preserve a
+// representative subset of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// For example,
+//
+// core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
+// Sampler can be configured to report sampling decisions with the SamplerHook
+// option.
+//
+// Keep in mind that Zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
+ s := &sampler{
+ Core: core,
+ tick: tick,
+ counts: newCounters(),
+ first: uint64(first),
+ thereafter: uint64(thereafter),
+ hook: nopSamplingHook,
+ }
+ for _, opt := range opts {
+ opt.apply(s)
+ }
+
+ return s
+}
+
+type sampler struct {
+ Core
+
+ counts *counters
+ tick time.Duration
+ first, thereafter uint64
+ hook func(Entry, SamplingDecision)
+}
+
+var (
+ _ Core = (*sampler)(nil)
+ _ leveledEnabler = (*sampler)(nil)
+)
+
+// NewSampler creates a Core that samples incoming entries, which
+// caps the CPU and I/O load of logging while attempting to preserve a
+// representative subset of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// Keep in mind that zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+//
+// Deprecated: use NewSamplerWithOptions.
+func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
+ return NewSamplerWithOptions(core, tick, first, thereafter)
+}
+
+func (s *sampler) Level() Level {
+ return LevelOf(s.Core)
+}
+
+func (s *sampler) With(fields []Field) Core {
+ return &sampler{
+ Core: s.Core.With(fields),
+ tick: s.tick,
+ counts: s.counts,
+ first: s.first,
+ thereafter: s.thereafter,
+ hook: s.hook,
+ }
+}
+
+func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !s.Enabled(ent.Level) {
+ return ce
+ }
+
+ if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
+ }
+ return s.Core.Check(ent, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
new file mode 100644
index 0000000..9bb32f0
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -0,0 +1,96 @@
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type multiCore []Core
+
+var (
+ _ leveledEnabler = multiCore(nil)
+ _ Core = multiCore(nil)
+)
+
+// NewTee creates a Core that duplicates log entries into two or more
+// underlying Cores.
+//
+// Calling it with a single Core returns the input unchanged, and calling
+// it with no input returns a no-op Core.
+func NewTee(cores ...Core) Core {
+ switch len(cores) {
+ case 0:
+ return NewNopCore()
+ case 1:
+ return cores[0]
+ default:
+ return multiCore(cores)
+ }
+}
+
+func (mc multiCore) With(fields []Field) Core {
+ clone := make(multiCore, len(mc))
+ for i := range mc {
+ clone[i] = mc[i].With(fields)
+ }
+ return clone
+}
+
+func (mc multiCore) Level() Level {
+ minLvl := _maxLevel // mc is never empty
+ for i := range mc {
+ if lvl := LevelOf(mc[i]); lvl < minLvl {
+ minLvl = lvl
+ }
+ }
+ return minLvl
+}
+
+func (mc multiCore) Enabled(lvl Level) bool {
+ for i := range mc {
+ if mc[i].Enabled(lvl) {
+ return true
+ }
+ }
+ return false
+}
+
+func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ for i := range mc {
+ ce = mc[i].Check(ent, ce)
+ }
+ return ce
+}
+
+func (mc multiCore) Write(ent Entry, fields []Field) error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Write(ent, fields))
+ }
+ return err
+}
+
+func (mc multiCore) Sync() error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Sync())
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go
new file mode 100644
index 0000000..d4a1af3
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go
@@ -0,0 +1,122 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "io"
+ "sync"
+
+ "go.uber.org/multierr"
+)
+
+// A WriteSyncer is an io.Writer that can also flush any buffered data. Note
+// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer.
+type WriteSyncer interface {
+ io.Writer
+ Sync() error
+}
+
+// AddSync converts an io.Writer to a WriteSyncer. It attempts to be
+// intelligent: if the concrete type of the io.Writer implements WriteSyncer,
+// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync.
+func AddSync(w io.Writer) WriteSyncer {
+ switch w := w.(type) {
+ case WriteSyncer:
+ return w
+ default:
+ return writerWrapper{w}
+ }
+}
+
+type lockedWriteSyncer struct {
+ sync.Mutex
+ ws WriteSyncer
+}
+
+// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In
+// particular, *os.Files must be locked before use.
+func Lock(ws WriteSyncer) WriteSyncer {
+ if _, ok := ws.(*lockedWriteSyncer); ok {
+ // no need to layer on another lock
+ return ws
+ }
+ return &lockedWriteSyncer{ws: ws}
+}
+
+func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
+ s.Lock()
+ n, err := s.ws.Write(bs)
+ s.Unlock()
+ return n, err
+}
+
+func (s *lockedWriteSyncer) Sync() error {
+ s.Lock()
+ err := s.ws.Sync()
+ s.Unlock()
+ return err
+}
+
+type writerWrapper struct {
+ io.Writer
+}
+
+func (w writerWrapper) Sync() error {
+ return nil
+}
+
+type multiWriteSyncer []WriteSyncer
+
+// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes
+// and sync calls, much like io.MultiWriter.
+func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer {
+ if len(ws) == 1 {
+ return ws[0]
+ }
+ return multiWriteSyncer(ws)
+}
+
+// See https://golang.org/src/io/multi.go
+// When not all underlying syncers write the same number of bytes,
+// the smallest number is returned even though Write() is called on
+// all of them.
+func (ws multiWriteSyncer) Write(p []byte) (int, error) {
+ var writeErr error
+ nWritten := 0
+ for _, w := range ws {
+ n, err := w.Write(p)
+ writeErr = multierr.Append(writeErr, err)
+ if nWritten == 0 && n != 0 {
+ nWritten = n
+ } else if n < nWritten {
+ nWritten = n
+ }
+ }
+ return nWritten, writeErr
+}
+
+func (ws multiWriteSyncer) Sync() error {
+ var err error
+ for _, w := range ws {
+ err = multierr.Append(err, w.Sync())
+ }
+ return err
+}
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
new file mode 100644
index 0000000..2a7cf70
--- /dev/null
+++ b/vendor/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go
new file mode 100644
index 0000000..7d9281e
--- /dev/null
+++ b/vendor/golang.org/x/crypto/md4/md4.go
@@ -0,0 +1,122 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package md4 implements the MD4 hash algorithm as defined in RFC 1320.
+//
+// Deprecated: MD4 is cryptographically broken and should only be used
+// where compatibility with legacy systems, not security, is the goal. Instead,
+// use a secure hash like SHA-256 (from crypto/sha256).
+package md4
+
+import (
+ "crypto"
+ "hash"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.MD4, New)
+}
+
+// The size of an MD4 checksum in bytes.
+const Size = 16
+
+// The blocksize of MD4 in bytes.
+const BlockSize = 64
+
+const (
+ _Chunk = 64
+ _Init0 = 0x67452301
+ _Init1 = 0xEFCDAB89
+ _Init2 = 0x98BADCFE
+ _Init3 = 0x10325476
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ s [4]uint32
+ x [_Chunk]byte
+ nx int
+ len uint64
+}
+
+func (d *digest) Reset() {
+ d.s[0] = _Init0
+ d.s[1] = _Init1
+ d.s[2] = _Init2
+ d.s[3] = _Init3
+ d.nx = 0
+ d.len = 0
+}
+
+// New returns a new hash.Hash computing the MD4 checksum.
+func New() hash.Hash {
+ d := new(digest)
+ d.Reset()
+ return d
+}
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Write(p []byte) (nn int, err error) {
+ nn = len(p)
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n := len(p)
+ if n > _Chunk-d.nx {
+ n = _Chunk - d.nx
+ }
+ for i := 0; i < n; i++ {
+ d.x[d.nx+i] = p[i]
+ }
+ d.nx += n
+ if d.nx == _Chunk {
+ _Block(d, d.x[0:])
+ d.nx = 0
+ }
+ p = p[n:]
+ }
+ n := _Block(d, p)
+ p = p[n:]
+ if len(p) > 0 {
+ d.nx = copy(d.x[:], p)
+ }
+ return
+}
+
+func (d0 *digest) Sum(in []byte) []byte {
+ // Make a copy of d0, so that caller can keep writing and summing.
+ d := new(digest)
+ *d = *d0
+
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ len := d.len
+ var tmp [64]byte
+ tmp[0] = 0x80
+ if len%64 < 56 {
+ d.Write(tmp[0 : 56-len%64])
+ } else {
+ d.Write(tmp[0 : 64+56-len%64])
+ }
+
+ // Length in bits.
+ len <<= 3
+ for i := uint(0); i < 8; i++ {
+ tmp[i] = byte(len >> (8 * i))
+ }
+ d.Write(tmp[0:8])
+
+ if d.nx != 0 {
+ panic("d.nx != 0")
+ }
+
+ for _, s := range d.s {
+ in = append(in, byte(s>>0))
+ in = append(in, byte(s>>8))
+ in = append(in, byte(s>>16))
+ in = append(in, byte(s>>24))
+ }
+ return in
+}
diff --git a/vendor/golang.org/x/crypto/md4/md4block.go b/vendor/golang.org/x/crypto/md4/md4block.go
new file mode 100644
index 0000000..5ea1ba9
--- /dev/null
+++ b/vendor/golang.org/x/crypto/md4/md4block.go
@@ -0,0 +1,91 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// MD4 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+package md4
+
+import "math/bits"
+
+var shift1 = []int{3, 7, 11, 19}
+var shift2 = []int{3, 5, 9, 13}
+var shift3 = []int{3, 9, 11, 15}
+
+var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15}
+var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}
+
+func _Block(dig *digest, p []byte) int {
+ a := dig.s[0]
+ b := dig.s[1]
+ c := dig.s[2]
+ d := dig.s[3]
+ n := 0
+ var X [16]uint32
+ for len(p) >= _Chunk {
+ aa, bb, cc, dd := a, b, c, d
+
+ j := 0
+ for i := 0; i < 16; i++ {
+ X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
+ j += 4
+ }
+
+ // If this needs to be made faster in the future,
+ // the usual trick is to unroll each of these
+ // loops by a factor of 4; that lets you replace
+ // the shift[] lookups with constants and,
+ // with suitable variable renaming in each
+ // unrolled body, delete the a, b, c, d = d, a, b, c
+ // (or you can let the optimizer do the renaming).
+ //
+ // The index variables are uint so that % by a power
+ // of two can be optimized easily by a compiler.
+
+ // Round 1.
+ for i := uint(0); i < 16; i++ {
+ x := i
+ s := shift1[i%4]
+ f := ((c ^ d) & b) ^ d
+ a += f + X[x]
+ a = bits.RotateLeft32(a, s)
+ a, b, c, d = d, a, b, c
+ }
+
+ // Round 2.
+ for i := uint(0); i < 16; i++ {
+ x := xIndex2[i]
+ s := shift2[i%4]
+ g := (b & c) | (b & d) | (c & d)
+ a += g + X[x] + 0x5a827999
+ a = bits.RotateLeft32(a, s)
+ a, b, c, d = d, a, b, c
+ }
+
+ // Round 3.
+ for i := uint(0); i < 16; i++ {
+ x := xIndex3[i]
+ s := shift3[i%4]
+ h := b ^ c ^ d
+ a += h + X[x] + 0x6ed9eba1
+ a = bits.RotateLeft32(a, s)
+ a, b, c, d = d, a, b, c
+ }
+
+ a += aa
+ b += bb
+ c += cc
+ d += dd
+
+ p = p[_Chunk:]
+ n += _Chunk
+ }
+
+ dig.s[0] = a
+ dig.s[1] = b
+ dig.s[2] = c
+ dig.s[3] = d
+ return n
+}
diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 0000000..28cd99c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2
+
+import (
+ "crypto/hmac"
+ "hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+ prf := hmac.New(h, password)
+ hashLen := prf.Size()
+ numBlocks := (keyLen + hashLen - 1) / hashLen
+
+ var buf [4]byte
+ dk := make([]byte, 0, numBlocks*hashLen)
+ U := make([]byte, hashLen)
+ for block := 1; block <= numBlocks; block++ {
+ // N.B.: || means concatenation, ^ means XOR
+ // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+ // U_1 = PRF(password, salt || uint(i))
+ prf.Reset()
+ prf.Write(salt)
+ buf[0] = byte(block >> 24)
+ buf[1] = byte(block >> 16)
+ buf[2] = byte(block >> 8)
+ buf[3] = byte(block)
+ prf.Write(buf[:4])
+ dk = prf.Sum(dk)
+ T := dk[len(dk)-hashLen:]
+ copy(U, T)
+
+ // U_n = PRF(password, U_(n-1))
+ for n := 2; n <= iter; n++ {
+ prf.Reset()
+ prf.Write(U)
+ U = U[:0]
+ U = prf.Sum(U)
+ for x := range U {
+ T[x] ^= U[x]
+ }
+ }
+ }
+ return dk[:keyLen]
+}
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..2a7cf70
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go
new file mode 100644
index 0000000..e6cd0ce
--- /dev/null
+++ b/vendor/golang.org/x/net/http/httpguts/guts.go
@@ -0,0 +1,50 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httpguts provides functions implementing various details
+// of the HTTP specification.
+//
+// This package is shared by the standard library (which vendors it)
+// and x/net/http2. It comes with no API stability promise.
+package httpguts
+
+import (
+ "net/textproto"
+ "strings"
+)
+
+// ValidTrailerHeader reports whether name is a valid header field name to appear
+// in trailers.
+// See RFC 7230, Section 4.1.2
+func ValidTrailerHeader(name string) bool {
+ name = textproto.CanonicalMIMEHeaderKey(name)
+ if strings.HasPrefix(name, "If-") || badTrailer[name] {
+ return false
+ }
+ return true
+}
+
+var badTrailer = map[string]bool{
+ "Authorization": true,
+ "Cache-Control": true,
+ "Connection": true,
+ "Content-Encoding": true,
+ "Content-Length": true,
+ "Content-Range": true,
+ "Content-Type": true,
+ "Expect": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Max-Forwards": true,
+ "Pragma": true,
+ "Proxy-Authenticate": true,
+ "Proxy-Authorization": true,
+ "Proxy-Connection": true,
+ "Range": true,
+ "Realm": true,
+ "Te": true,
+ "Trailer": true,
+ "Transfer-Encoding": true,
+ "Www-Authenticate": true,
+}
diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go
new file mode 100644
index 0000000..9b4de94
--- /dev/null
+++ b/vendor/golang.org/x/net/http/httpguts/httplex.go
@@ -0,0 +1,347 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpguts
+
+import (
+ "net"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/idna"
+)
+
+var isTokenTable = [256]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+func IsTokenRune(r rune) bool {
+ return r < utf8.RuneSelf && isTokenTable[byte(r)]
+}
+
+// HeaderValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func HeaderValuesContainsToken(values []string, token string) bool {
+ for _, v := range values {
+ if headerValueContainsToken(v, token) {
+ return true
+ }
+ }
+ return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+ // TODO: consider using strings.Trim(x, " \t") instead,
+ // if and when it's fast enough. See issue 10292.
+ // But this ASCII-only code will probably always beat UTF-8
+ // aware code.
+ for len(x) > 0 && isOWS(x[0]) {
+ x = x[1:]
+ }
+ for len(x) > 0 && isOWS(x[len(x)-1]) {
+ x = x[:len(x)-1]
+ }
+ return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+ for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') {
+ if tokenEqual(trimOWS(v[:comma]), token) {
+ return true
+ }
+ v = v[comma+1:]
+ }
+ return tokenEqual(trimOWS(v), token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+ if len(t1) != len(t2) {
+ return false
+ }
+ for i, b := range t1 {
+ if b >= utf8.RuneSelf {
+ // No UTF-8 or non-ASCII allowed in tokens.
+ return false
+ }
+ if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// isLWS reports whether b is linear white space, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+//
+// LWS = [CRLF] 1*( SP | HT )
+func isLWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// isCTL reports whether b is a control byte, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+//
+// CTL =
+func isCTL(b byte) bool {
+ const del = 0x7f // a CTL
+ return b < ' ' || b == del
+}
+
+// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
+// HTTP/2 imposes the additional restriction that uppercase ASCII
+// letters are not allowed.
+//
+// RFC 7230 says:
+//
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// token = 1*tchar
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+func ValidHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for i := 0; i < len(v); i++ {
+ if !isTokenTable[v[i]] {
+ return false
+ }
+ }
+ return true
+}
+
+// ValidHostHeader reports whether h is a valid host header.
+func ValidHostHeader(h string) bool {
+ // The latest spec is actually this:
+ //
+ // http://tools.ietf.org/html/rfc7230#section-5.4
+ // Host = uri-host [ ":" port ]
+ //
+ // Where uri-host is:
+ // http://tools.ietf.org/html/rfc3986#section-3.2.2
+ //
+ // But we're going to be much more lenient for now and just
+ // search for any byte that's not a valid byte in any of those
+ // expressions.
+ for i := 0; i < len(h); i++ {
+ if !validHostByte[h[i]] {
+ return false
+ }
+ }
+ return true
+}
+
+// See the validHostHeader comment.
+var validHostByte = [256]bool{
+ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
+ '8': true, '9': true,
+
+ 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
+ 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
+ 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
+ 'y': true, 'z': true,
+
+ 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
+ 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
+ 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
+ 'Y': true, 'Z': true,
+
+ '!': true, // sub-delims
+ '$': true, // sub-delims
+ '%': true, // pct-encoded (and used in IPv6 zones)
+ '&': true, // sub-delims
+ '(': true, // sub-delims
+ ')': true, // sub-delims
+ '*': true, // sub-delims
+ '+': true, // sub-delims
+ ',': true, // sub-delims
+ '-': true, // unreserved
+ '.': true, // unreserved
+ ':': true, // IPv6address + Host expression's optional port
+ ';': true, // sub-delims
+ '=': true, // sub-delims
+ '[': true,
+ '\'': true, // sub-delims
+ ']': true,
+ '_': true, // unreserved
+ '~': true, // unreserved
+}
+
+// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
+//
+// message-header = field-name ":" [ field-value ]
+// field-value = *( field-content | LWS )
+// field-content =
+//
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
+//
+// TEXT =
+// LWS = [CRLF] 1*( SP | HT )
+// CTL =
+//
+// RFC 7230 says:
+//
+// field-value = *( field-content / obs-fold )
+// obj-fold = N/A to http2, and deprecated
+// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+// field-vchar = VCHAR / obs-text
+// obs-text = %x80-FF
+// VCHAR = "any visible [USASCII] character"
+//
+// http2 further says: "Similarly, HTTP/2 allows header field values
+// that are not valid. While most of the values that can be encoded
+// will not alter header field parsing, carriage return (CR, ASCII
+// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
+// 0x0) might be exploited by an attacker if they are translated
+// verbatim. Any request or response that contains a character not
+// permitted in a header field value MUST be treated as malformed
+// (Section 8.1.2.6). Valid characters are defined by the
+// field-content ABNF rule in Section 3.2 of [RFC7230]."
+//
+// This function does not (yet?) properly handle the rejection of
+// strings that begin or end with SP or HTAB.
+func ValidHeaderFieldValue(v string) bool {
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if isCTL(b) && !isLWS(b) {
+ return false
+ }
+ }
+ return true
+}
+
+func isASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
+
+// PunycodeHostPort returns the IDNA Punycode version
+// of the provided "host" or "host:port" string.
+func PunycodeHostPort(v string) (string, error) {
+ if isASCII(v) {
+ return v, nil
+ }
+
+ host, port, err := net.SplitHostPort(v)
+ if err != nil {
+ // The input 'v' argument was just a "host" argument,
+ // without a port. This error should not be returned
+ // to the caller.
+ host = v
+ port = ""
+ }
+ host, err = idna.ToASCII(host)
+ if err != nil {
+ // Non-UTF-8? Not representable in Punycode, in any
+ // case.
+ return "", err
+ }
+ if port == "" {
+ return host, nil
+ }
+ return net.JoinHostPort(host, port), nil
+}
diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore
new file mode 100644
index 0000000..190f122
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/.gitignore
@@ -0,0 +1,2 @@
+*~
+h2i/h2i
diff --git a/vendor/golang.org/x/net/http2/ascii.go b/vendor/golang.org/x/net/http2/ascii.go
new file mode 100644
index 0000000..17caa20
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/ascii.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "strings"
+
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func asciiEqualFold(s, t string) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if lower(s[i]) != lower(t[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func lower(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// isASCIIPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func isASCIIPrint(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' || s[i] > '~' {
+ return false
+ }
+ }
+ return true
+}
+
+// asciiToLower returns the lowercase version of s if s is ASCII and printable,
+// and whether or not it was.
+func asciiToLower(s string) (lower string, ok bool) {
+ if !isASCIIPrint(s) {
+ return "", false
+ }
+ return strings.ToLower(s), true
+}
diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go
new file mode 100644
index 0000000..c9a0cf3
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/ciphers.go
@@ -0,0 +1,641 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+// A list of the possible cipher suite ids. Taken from
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
+
+const (
+ cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
+ cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
+ cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
+ cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
+ cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
+ cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
+ cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
+ cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
+ cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
+ cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
+ cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
+ cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
+ cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
+ cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
+ cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
+ cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
+ cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
+ cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
+ cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
+ cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
+ cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
+ cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
+ cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
+ cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
+ cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
+ cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
+ cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
+ cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
+ // Reserved uint16 = 0x001C-1D
+ cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
+ cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
+ cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
+ cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
+ cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
+ cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
+ cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
+ cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
+ // Reserved uint16 = 0x0047-4F
+ // Reserved uint16 = 0x0050-58
+ // Reserved uint16 = 0x0059-5C
+ // Unassigned uint16 = 0x005D-5F
+ // Reserved uint16 = 0x0060-66
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
+ // Unassigned uint16 = 0x006E-83
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
+ cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
+ cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
+ cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
+ cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
+ cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
+ cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
+ cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
+ cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
+ cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
+ cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
+ cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
+ cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
+ cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
+ cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
+ cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
+ cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
+ cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
+ cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
+ cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
+ cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
+ cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
+ cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
+ cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
+ cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
+ cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
+ cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
+ cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
+ cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
+ cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
+ cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
+ cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
+ cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
+ // Unassigned uint16 = 0x00C6-FE
+ cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
+ // Unassigned uint16 = 0x01-55,*
+ cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
+ // Unassigned uint16 = 0x5601 - 0xC000
+ cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
+ cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
+ cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
+ cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
+ cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
+ cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
+ cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
+ cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
+ cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
+ cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
+ cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
+ cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
+ cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
+ cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
+ cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
+ cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
+ cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
+ cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
+ cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
+ cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
+ cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
+ cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
+ cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
+ cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
+ cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
+ cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
+ cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
+ cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
+ cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
+ cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
+ cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
+ cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
+ cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
+ cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
+ cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
+ cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
+ cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
+ cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
+ cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
+ cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
+ cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
+ cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
+ cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
+ cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
+ cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
+ cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
+ cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
+ cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
+ cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
+ cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
+ cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
+ cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
+ cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
+ cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
+ cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
+ cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
+ cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
+ cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
+ cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
+ cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
+ cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
+ cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
+ cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
+ cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
+ cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
+ cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
+ cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
+ cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
+ // Unassigned uint16 = 0xC0B0-FF
+ // Unassigned uint16 = 0xC1-CB,*
+ // Unassigned uint16 = 0xCC00-A7
+ cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
+ cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
+ cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
+ cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
+ cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
+ cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
+ cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
+)
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+// References:
+// https://tools.ietf.org/html/rfc7540#appendix-A
+// Reject cipher suites from Appendix A.
+// "This list includes those cipher suites that do not
+// offer an ephemeral key exchange and those that are
+// based on the TLS null, stream or block cipher type"
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case cipher_TLS_NULL_WITH_NULL_NULL,
+ cipher_TLS_RSA_WITH_NULL_MD5,
+ cipher_TLS_RSA_WITH_NULL_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_RSA_WITH_RC4_128_MD5,
+ cipher_TLS_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+ cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_DH_anon_WITH_RC4_128_MD5,
+ cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_KRB5_WITH_DES_CBC_SHA,
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_KRB5_WITH_RC4_128_SHA,
+ cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
+ cipher_TLS_KRB5_WITH_DES_CBC_MD5,
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
+ cipher_TLS_KRB5_WITH_RC4_128_MD5,
+ cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_PSK_WITH_NULL_SHA,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_NULL_SHA256,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_PSK_WITH_NULL_SHA256,
+ cipher_TLS_PSK_WITH_NULL_SHA384,
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
+ cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
+ cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_NULL_SHA,
+ cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_AES_128_CCM,
+ cipher_TLS_RSA_WITH_AES_256_CCM,
+ cipher_TLS_RSA_WITH_AES_128_CCM_8,
+ cipher_TLS_RSA_WITH_AES_256_CCM_8,
+ cipher_TLS_PSK_WITH_AES_128_CCM,
+ cipher_TLS_PSK_WITH_AES_256_CCM,
+ cipher_TLS_PSK_WITH_AES_128_CCM_8,
+ cipher_TLS_PSK_WITH_AES_256_CCM_8:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
new file mode 100644
index 0000000..e81b73e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -0,0 +1,311 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code's client connection pooling.
+
+package http2
+
+import (
+ "context"
+ "errors"
+ "net"
+ "net/http"
+ "sync"
+)
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type ClientConnPool interface {
+ // GetClientConn returns a specific HTTP/2 connection (usually
+ // a TLS-TCP connection) to an HTTP/2 server. On success, the
+ // returned ClientConn accounts for the upcoming RoundTrip
+ // call, so the caller should not omit it. If the caller needs
+ // to, ClientConn.RoundTrip can be called with a bogus
+ // new(http.Request) to release the stream reservation.
+ GetClientConn(req *http.Request, addr string) (*ClientConn, error)
+ MarkDead(*ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type clientConnPoolIdleCloser interface {
+ ClientConnPool
+ closeIdleConnections()
+}
+
+var (
+ _ clientConnPoolIdleCloser = (*clientConnPool)(nil)
+ _ clientConnPoolIdleCloser = noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type clientConnPool struct {
+ t *Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*ClientConn // key is host:port
+ dialing map[string]*dialCall // currently in-flight dials
+ keys map[*ClientConn][]string
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls
+}
+
+func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, dialOnMiss)
+}
+
+const (
+ dialOnMiss = true
+ noDialOnMiss = false
+)
+
+func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
+ if isConnectionCloseRequest(req) && dialOnMiss {
+ // It gets its own connection.
+ traceGetConn(req, addr)
+ const singleUse = true
+ cc, err := p.t.dialClientConn(req.Context(), addr, singleUse)
+ if err != nil {
+ return nil, err
+ }
+ return cc, nil
+ }
+ for {
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.ReserveNewRequest() {
+ // When a connection is presented to us by the net/http package,
+ // the GetConn hook has already been called.
+ // Don't call it a second time here.
+ if !cc.getConnCalled {
+ traceGetConn(req, addr)
+ }
+ cc.getConnCalled = false
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, ErrNoCachedConn
+ }
+ traceGetConn(req, addr)
+ call := p.getStartDialLocked(req.Context(), addr)
+ p.mu.Unlock()
+ <-call.done
+ if shouldRetryDial(call, req) {
+ continue
+ }
+ cc, err := call.res, call.err
+ if err != nil {
+ return nil, err
+ }
+ if cc.ReserveNewRequest() {
+ return cc, nil
+ }
+ }
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type dialCall struct {
+ _ incomparable
+ p *clientConnPool
+ // the context associated with the request
+ // that created this dialCall
+ ctx context.Context
+ done chan struct{} // closed when done
+ res *ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(call.ctx, addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *dialCall) dial(ctx context.Context, addr string) {
+ const singleUse = false // shared conn
+ c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+
+ close(c.done)
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*addConnCall)
+ }
+ call = &addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type addConnCall struct {
+ _ incomparable
+ p *clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *addConnCall) run(t *Transport, key string, nc net.Conn) {
+ cc, err := t.NewClientConn(nc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ cc.getConnCalled = true // already called by the net/http package
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+// p.mu must be held
+func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *clientConnPool) MarkDead(cc *ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type noDialClientConnPool struct{ *clientConnPool }
+
+func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, noDialOnMiss)
+}
+
+// shouldRetryDial reports whether the current request should
+// retry dialing after the call finished unsuccessfully, for example
+// if the dial was canceled because of a context cancellation or
+// deadline expiry.
+func shouldRetryDial(call *dialCall, req *http.Request) bool {
+ if call.err == nil {
+ // No error, no need to retry
+ return false
+ }
+ if call.ctx == req.Context() {
+ // If the call has the same context as the request, the dial
+ // should not be retried, since any cancellation will have come
+ // from this request.
+ return false
+ }
+ if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) {
+ // If the call error is not because of a context cancellation or a deadline expiry,
+ // the dial should not be retried.
+ return false
+ }
+ // Only retry if the error is a context cancellation error or deadline expiry
+ // and the context associated with the call was canceled or expired.
+ return call.ctx.Err() != nil
+}
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
new file mode 100644
index 0000000..ca645d9
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -0,0 +1,122 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "math"
+ "net/http"
+ "time"
+)
+
+// http2Config is a package-internal version of net/http.HTTP2Config.
+//
+// http.HTTP2Config was added in Go 1.24.
+// When running with a version of net/http that includes HTTP2Config,
+// we merge the configuration with the fields in Transport or Server
+// to produce an http2Config.
+//
+// Zero valued fields in http2Config are interpreted as in the
+// net/http.HTTPConfig documentation.
+//
+// Precedence order for reconciling configurations is:
+//
+// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
+// - Otherwise use the http2.{Server.Transport} value.
+// - If the resulting value is zero or out of range, use a default.
+type http2Config struct {
+ MaxConcurrentStreams uint32
+ MaxDecoderHeaderTableSize uint32
+ MaxEncoderHeaderTableSize uint32
+ MaxReadFrameSize uint32
+ MaxUploadBufferPerConnection int32
+ MaxUploadBufferPerStream int32
+ SendPingTimeout time.Duration
+ PingTimeout time.Duration
+ WriteByteTimeout time.Duration
+ PermitProhibitedCipherSuites bool
+ CountError func(errType string)
+}
+
+// configFromServer merges configuration settings from
+// net/http.Server.HTTP2Config and http2.Server.
+func configFromServer(h1 *http.Server, h2 *Server) http2Config {
+ conf := http2Config{
+ MaxConcurrentStreams: h2.MaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
+ MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
+ CountError: h2.CountError,
+ }
+ fillNetHTTPServerConfig(&conf, h1)
+ setConfigDefaults(&conf, true)
+ return conf
+}
+
+// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2
+// (the net/http Transport).
+func configFromTransport(h2 *Transport) http2Config {
+ conf := http2Config{
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ }
+
+ // Unlike most config fields, where out-of-range values revert to the default,
+ // Transport.MaxReadFrameSize clips.
+ if conf.MaxReadFrameSize < minMaxFrameSize {
+ conf.MaxReadFrameSize = minMaxFrameSize
+ } else if conf.MaxReadFrameSize > maxFrameSize {
+ conf.MaxReadFrameSize = maxFrameSize
+ }
+
+ if h2.t1 != nil {
+ fillNetHTTPTransportConfig(&conf, h2.t1)
+ }
+ setConfigDefaults(&conf, false)
+ return conf
+}
+
+func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
+ if *v < minval || *v > maxval {
+ *v = defval
+ }
+}
+
+func setConfigDefaults(conf *http2Config, server bool) {
+ setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
+ setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ if server {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
+ }
+ if server {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
+ }
+ setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
+ setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
+}
+
+// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
+// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
+func adjustHTTP1MaxHeaderSize(n int64) int64 {
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return n + typicalHeaders*perFieldOverhead
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
new file mode 100644
index 0000000..5b516c5
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go124.go
@@ -0,0 +1,61 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.24
+
+package http2
+
+import "net/http"
+
+// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
+ fillNetHTTPConfig(conf, srv.HTTP2)
+}
+
+// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
+ fillNetHTTPConfig(conf, tr.HTTP2)
+}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
new file mode 100644
index 0000000..060fd6c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_pre_go124.go
@@ -0,0 +1,16 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.24
+
+package http2
+
+import "net/http"
+
+// Pre-Go 1.24 fallback.
+// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
+
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
+
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go
new file mode 100644
index 0000000..e6f55cb
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/databuffer.go
@@ -0,0 +1,149 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+)
+
+// Buffer chunks are allocated from a pool to reduce pressure on GC.
+// The maximum wasted space per dataBuffer is 2x the largest size class,
+// which happens when the dataBuffer has multiple chunks and there is
+// one unread byte in both the first and last chunks. We use a few size
+// classes to minimize overheads for servers that typically receive very
+// small request bodies.
+//
+// TODO: Benchmark to determine if the pools are necessary. The GC may have
+// improved enough that we can instead allocate chunks like this:
+// make([]byte, max(16<<10, expectedBytesRemaining))
+var dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return new([1 << 10]byte) }},
+ {New: func() interface{} { return new([2 << 10]byte) }},
+ {New: func() interface{} { return new([4 << 10]byte) }},
+ {New: func() interface{} { return new([8 << 10]byte) }},
+ {New: func() interface{} { return new([16 << 10]byte) }},
+}
+
+func getDataBufferChunk(size int64) []byte {
+ switch {
+ case size <= 1<<10:
+ return dataChunkPools[0].Get().(*[1 << 10]byte)[:]
+ case size <= 2<<10:
+ return dataChunkPools[1].Get().(*[2 << 10]byte)[:]
+ case size <= 4<<10:
+ return dataChunkPools[2].Get().(*[4 << 10]byte)[:]
+ case size <= 8<<10:
+ return dataChunkPools[3].Get().(*[8 << 10]byte)[:]
+ default:
+ return dataChunkPools[4].Get().(*[16 << 10]byte)[:]
+ }
+}
+
+func putDataBufferChunk(p []byte) {
+ switch len(p) {
+ case 1 << 10:
+ dataChunkPools[0].Put((*[1 << 10]byte)(p))
+ case 2 << 10:
+ dataChunkPools[1].Put((*[2 << 10]byte)(p))
+ case 4 << 10:
+ dataChunkPools[2].Put((*[4 << 10]byte)(p))
+ case 8 << 10:
+ dataChunkPools[3].Put((*[8 << 10]byte)(p))
+ case 16 << 10:
+ dataChunkPools[4].Put((*[16 << 10]byte)(p))
+ default:
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
+ }
+}
+
+// dataBuffer is an io.ReadWriter backed by a list of data chunks.
+// Each dataBuffer is used to read DATA frames on a single stream.
+// The buffer is divided into chunks so the server can limit the
+// total memory used by a single connection without limiting the
+// request body size on any single stream.
+type dataBuffer struct {
+ chunks [][]byte
+ r int // next byte to read is chunks[0][r]
+ w int // next byte to write is chunks[len(chunks)-1][w]
+ size int // total buffered bytes
+ expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
+}
+
+var errReadEmpty = errors.New("read from empty dataBuffer")
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *dataBuffer) Read(p []byte) (int, error) {
+ if b.size == 0 {
+ return 0, errReadEmpty
+ }
+ var ntotal int
+ for len(p) > 0 && b.size > 0 {
+ readFrom := b.bytesFromFirstChunk()
+ n := copy(p, readFrom)
+ p = p[n:]
+ ntotal += n
+ b.r += n
+ b.size -= n
+ // If the first chunk has been consumed, advance to the next chunk.
+ if b.r == len(b.chunks[0]) {
+ putDataBufferChunk(b.chunks[0])
+ end := len(b.chunks) - 1
+ copy(b.chunks[:end], b.chunks[1:])
+ b.chunks[end] = nil
+ b.chunks = b.chunks[:end]
+ b.r = 0
+ }
+ }
+ return ntotal, nil
+}
+
+func (b *dataBuffer) bytesFromFirstChunk() []byte {
+ if len(b.chunks) == 1 {
+ return b.chunks[0][b.r:b.w]
+ }
+ return b.chunks[0][b.r:]
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *dataBuffer) Len() int {
+ return b.size
+}
+
+// Write appends p to the buffer.
+func (b *dataBuffer) Write(p []byte) (int, error) {
+ ntotal := len(p)
+ for len(p) > 0 {
+ // If the last chunk is empty, allocate a new chunk. Try to allocate
+ // enough to fully copy p plus any additional bytes we expect to
+ // receive. However, this may allocate less than len(p).
+ want := int64(len(p))
+ if b.expected > want {
+ want = b.expected
+ }
+ chunk := b.lastChunkOrAlloc(want)
+ n := copy(chunk[b.w:], p)
+ p = p[n:]
+ b.w += n
+ b.size += n
+ b.expected -= int64(n)
+ }
+ return ntotal, nil
+}
+
+func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
+ if len(b.chunks) != 0 {
+ last := b.chunks[len(b.chunks)-1]
+ if b.w < len(last) {
+ return last
+ }
+ }
+ chunk := getDataBufferChunk(want)
+ b.chunks = append(b.chunks, chunk)
+ b.w = 0
+ return chunk
+}
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
new file mode 100644
index 0000000..f2067da
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+)
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+ ErrCodeNo ErrCode = 0x0
+ ErrCodeProtocol ErrCode = 0x1
+ ErrCodeInternal ErrCode = 0x2
+ ErrCodeFlowControl ErrCode = 0x3
+ ErrCodeSettingsTimeout ErrCode = 0x4
+ ErrCodeStreamClosed ErrCode = 0x5
+ ErrCodeFrameSize ErrCode = 0x6
+ ErrCodeRefusedStream ErrCode = 0x7
+ ErrCodeCancel ErrCode = 0x8
+ ErrCodeCompression ErrCode = 0x9
+ ErrCodeConnect ErrCode = 0xa
+ ErrCodeEnhanceYourCalm ErrCode = 0xb
+ ErrCodeInadequateSecurity ErrCode = 0xc
+ ErrCodeHTTP11Required ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+ ErrCodeNo: "NO_ERROR",
+ ErrCodeProtocol: "PROTOCOL_ERROR",
+ ErrCodeInternal: "INTERNAL_ERROR",
+ ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ ErrCodeStreamClosed: "STREAM_CLOSED",
+ ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ ErrCodeRefusedStream: "REFUSED_STREAM",
+ ErrCodeCancel: "CANCEL",
+ ErrCodeCompression: "COMPRESSION_ERROR",
+ ErrCodeConnect: "CONNECT_ERROR",
+ ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+func (e ErrCode) stringToken() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+ StreamID uint32
+ Code ErrCode
+ Cause error // optional additional detail
+}
+
+// errFromPeer is a sentinel error value for StreamError.Cause to
+// indicate that the StreamError was sent from the peer over the wire
+// and wasn't locally generated in the Transport.
+var errFromPeer = errors.New("received from peer")
+
+func streamError(id uint32, code ErrCode) StreamError {
+ return StreamError{StreamID: id, Code: code}
+}
+
+func (e StreamError) Error() string {
+ if e.Cause != nil {
+ return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
+ }
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connError represents an HTTP/2 ConnectionError error code, along
+// with a string (for debugging) explaining why.
+//
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(Code), after stashing away
+// the Reason into the Framer's errDetail field, accessible via
+// the (*Framer).ErrorDetail method.
+type connError struct {
+ Code ErrCode // the ConnectionError error code
+ Reason string // additional reason
+}
+
+func (e connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type pseudoHeaderError string
+
+func (e pseudoHeaderError) Error() string {
+ return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type duplicatePseudoHeaderError string
+
+func (e duplicatePseudoHeaderError) Error() string {
+ return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type headerFieldNameError string
+
+func (e headerFieldNameError) Error() string {
+ return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type headerFieldValueError string
+
+func (e headerFieldValueError) Error() string {
+ return fmt.Sprintf("invalid header field value for %q", string(e))
+}
+
+var (
+ errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+ errPseudoAfterRegular = errors.New("pseudo header field after regular")
+)
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
new file mode 100644
index 0000000..b7dbd18
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -0,0 +1,120 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Flow control
+
+package http2
+
+// inflowMinRefresh is the minimum number of bytes we'll send for a
+// flow control window update.
+const inflowMinRefresh = 4 << 10
+
+// inflow accounts for an inbound flow control window.
+// It tracks both the latest window sent to the peer (used for enforcement)
+// and the accumulated unsent window.
+type inflow struct {
+ avail int32
+ unsent int32
+}
+
+// init sets the initial window.
+func (f *inflow) init(n int32) {
+ f.avail = n
+}
+
+// add adds n bytes to the window, with a maximum window size of max,
+// indicating that the peer can now send us more data.
+// For example, the user read from a {Request,Response} body and consumed
+// some of the buffered data, so the peer can now send more.
+// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer.
+// Window updates are accumulated and sent when the unsent capacity
+// is at least inflowMinRefresh or will at least double the peer's available window.
+func (f *inflow) add(n int) (connAdd int32) {
+ if n < 0 {
+ panic("negative update")
+ }
+ unsent := int64(f.unsent) + int64(n)
+ // "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets."
+ // RFC 7540 Section 6.9.1.
+ const maxWindow = 1<<31 - 1
+ if unsent+int64(f.avail) > maxWindow {
+ panic("flow control update exceeds maximum window size")
+ }
+ f.unsent = int32(unsent)
+ if f.unsent < inflowMinRefresh && f.unsent < f.avail {
+ // If there aren't at least inflowMinRefresh bytes of window to send,
+ // and this update won't at least double the window, buffer the update for later.
+ return 0
+ }
+ f.avail += f.unsent
+ f.unsent = 0
+ return int32(unsent)
+}
+
+// take attempts to take n bytes from the peer's flow control window.
+// It reports whether the window has available capacity.
+func (f *inflow) take(n uint32) bool {
+ if n > uint32(f.avail) {
+ return false
+ }
+ f.avail -= int32(n)
+ return true
+}
+
+// takeInflows attempts to take n bytes from two inflows,
+// typically connection-level and stream-level flows.
+// It reports whether both windows have available capacity.
+func takeInflows(f1, f2 *inflow, n uint32) bool {
+ if n > uint32(f1.avail) || n > uint32(f2.avail) {
+ return false
+ }
+ f1.avail -= int32(n)
+ f2.avail -= int32(n)
+ return true
+}
+
+// outflow is the outbound flow control window's size.
+type outflow struct {
+ _ incomparable
+
+ // n is the number of DATA bytes we're allowed to send.
+ // An outflow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level outflow that is
+ // shared by all streams on that conn. It is nil for the outflow
+ // that's on the conn directly.
+ conn *outflow
+}
+
+func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf }
+
+func (f *outflow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *outflow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *outflow) add(n int32) bool {
+ sum := f.n + n
+ if (sum > n) == (f.n > 0) {
+ f.n = sum
+ return true
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
new file mode 100644
index 0000000..97bd8b0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -0,0 +1,1702 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// https://httpwg.org/specs/rfc7540.html#rfc.section.11.2
+type FrameType uint8
+
+const (
+ FrameData FrameType = 0x0
+ FrameHeaders FrameType = 0x1
+ FramePriority FrameType = 0x2
+ FrameRSTStream FrameType = 0x3
+ FrameSettings FrameType = 0x4
+ FramePushPromise FrameType = 0x5
+ FramePing FrameType = 0x6
+ FrameGoAway FrameType = 0x7
+ FrameWindowUpdate FrameType = 0x8
+ FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+ FrameData: "DATA",
+ FrameHeaders: "HEADERS",
+ FramePriority: "PRIORITY",
+ FrameRSTStream: "RST_STREAM",
+ FrameSettings: "SETTINGS",
+ FramePushPromise: "PUSH_PROMISE",
+ FramePing: "PING",
+ FrameGoAway: "GOAWAY",
+ FrameWindowUpdate: "WINDOW_UPDATE",
+ FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+ if s, ok := frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ FlagDataEndStream Flags = 0x1
+ FlagDataPadded Flags = 0x8
+
+ // Headers Frame
+ FlagHeadersEndStream Flags = 0x1
+ FlagHeadersEndHeaders Flags = 0x4
+ FlagHeadersPadded Flags = 0x8
+ FlagHeadersPriority Flags = 0x20
+
+ // Settings Frame
+ FlagSettingsAck Flags = 0x1
+
+ // Ping Frame
+ FlagPingAck Flags = 0x1
+
+ // Continuation Frame
+ FlagContinuationEndHeaders Flags = 0x4
+
+ FlagPushPromiseEndHeaders Flags = 0x4
+ FlagPushPromisePadded Flags = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+ FrameData: {
+ FlagDataEndStream: "END_STREAM",
+ FlagDataPadded: "PADDED",
+ },
+ FrameHeaders: {
+ FlagHeadersEndStream: "END_STREAM",
+ FlagHeadersEndHeaders: "END_HEADERS",
+ FlagHeadersPadded: "PADDED",
+ FlagHeadersPriority: "PRIORITY",
+ },
+ FrameSettings: {
+ FlagSettingsAck: "ACK",
+ },
+ FramePing: {
+ FlagPingAck: "ACK",
+ },
+ FrameContinuation: {
+ FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ FramePushPromise: {
+ FlagPushPromiseEndHeaders: "END_HEADERS",
+ FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+ FrameData: parseDataFrame,
+ FrameHeaders: parseHeadersFrame,
+ FramePriority: parsePriorityFrame,
+ FrameRSTStream: parseRSTStreamFrame,
+ FrameSettings: parseSettingsFrame,
+ FramePushPromise: parsePushPromise,
+ FramePing: parsePingFrame,
+ FrameGoAway: parseGoAwayFrame,
+ FrameWindowUpdate: parseWindowUpdateFrame,
+ FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+ if f := frameParsers[t]; f != nil {
+ return f
+ }
+ return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See https://httpwg.org/specs/rfc7540.html#FrameHeader
+type FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1< 1 {
+ buf.WriteByte('|')
+ }
+ name := flagName[h.Type][Flags(1<>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - frameHeaderLen
+ if length >= (1 << 24) {
+ return ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ if f.logWrites {
+ f.logWrite()
+ }
+
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+}
+
+func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ minMaxFrameSize = 1 << 14
+ maxFrameSize = 1<<24 - 1
+)
+
+// SetReuseFrames allows the Framer to reuse Frames.
+// If called on a Framer, Frames returned by calls to ReadFrame are only
+// valid until the next call to ReadFrame.
+func (fr *Framer) SetReuseFrames() {
+ if fr.frameCache != nil {
+ return
+ }
+ fr.frameCache = &frameCache{}
+}
+
+type frameCache struct {
+ dataFrame DataFrame
+}
+
+func (fc *frameCache) getDataFrame() *DataFrame {
+ if fc == nil {
+ return &DataFrame{}
+ }
+ return &fc.dataFrame
+}
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+ fr := &Framer{
+ w: w,
+ r: r,
+ countError: func(string) {},
+ logReads: logFrameReads,
+ logWrites: logFrameWrites,
+ debugReadLoggerf: log.Printf,
+ debugWriteLoggerf: log.Printf,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+ if v > maxFrameSize {
+ v = maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *Framer) ErrorDetail() error {
+ return fr.errDetail
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func terminalReadFrameError(err error) bool {
+ if _, ok := err.(StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+//
+// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
+// indicates the stream responsible for the error.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fr.errDetail = nil
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ if fh == invalidHTTP1LookingFrameHeader() {
+ return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+ }
+ return nil, ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ if fh == invalidHTTP1LookingFrameHeader() {
+ return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+ }
+ return nil, err
+ }
+ f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
+ if err != nil {
+ if ce, ok := err.(connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
+ return nil, err
+ }
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ }
+ if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
+ return fr.readMetaFrame(f.(*HeadersFrame))
+ }
+ return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *Framer) connError(code ErrCode, reason string) error {
+ fr.errDetail = errors.New(reason)
+ return ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *Framer) checkFrameOrder(f Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != FrameContinuation {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == FrameContinuation {
+ return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case FrameHeaders, FrameContinuation:
+ if fh.Flags.Has(FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.1
+type DataFrame struct {
+ FrameHeader
+ data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func parseDataFrame(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ countError("frame_data_stream_0")
+ return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
+ }
+ f := fc.getDataFrame()
+ f.FrameHeader = fh
+
+ var padSize byte
+ if fh.Flags.Has(FlagDataPadded) {
+ var err error
+ payload, padSize, err = readByte(payload)
+ if err != nil {
+ countError("frame_data_pad_byte_short")
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ countError("frame_data_pad_too_big")
+ return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var (
+ errStreamID = errors.New("invalid stream ID")
+ errDepStreamID = errors.New("invalid dependent stream ID")
+ errPadLength = errors.New("pad length too large")
+ errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
+)
+
+func validStreamIDOrZero(streamID uint32) bool {
+ return streamID&(1<<31) == 0
+}
+
+func validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ return f.WriteDataPadded(streamID, endStream, data, nil)
+}
+
+// WriteDataPadded writes a DATA frame with optional padding.
+//
+// If pad is nil, the padding bit is not sent.
+// The length of pad must not exceed 255 bytes.
+// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
+ if err := f.startWriteDataPadded(streamID, endStream, data, pad); err != nil {
+ return err
+ }
+ return f.endWrite()
+}
+
+// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer.
+// The caller should call endWrite to flush the frame to the underlying writer.
+func (f *Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ if len(pad) > 0 {
+ if len(pad) > 255 {
+ return errPadLength
+ }
+ if !f.AllowIllegalWrites {
+ for _, b := range pad {
+ if b != 0 {
+ // "Padding octets MUST be set to zero when sending."
+ return errPadBytes
+ }
+ }
+ }
+ }
+ var flags Flags
+ if endStream {
+ flags |= FlagDataEndStream
+ }
+ if pad != nil {
+ flags |= FlagDataPadded
+ }
+ f.startWrite(FrameData, flags, streamID)
+ if pad != nil {
+ f.wbuf = append(f.wbuf, byte(len(pad)))
+ }
+ f.wbuf = append(f.wbuf, data...)
+ f.wbuf = append(f.wbuf, pad...)
+ return nil
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See https://httpwg.org/specs/rfc7540.html#SETTINGS
+type SettingsFrame struct {
+ FrameHeader
+ p []byte
+}
+
+func parseSettingsFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
+ if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ countError("frame_settings_ack_with_length")
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ countError("frame_settings_has_stream")
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ countError("frame_settings_mod_6")
+ // Expecting even number of 6 byte settings.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ f := &SettingsFrame{FrameHeader: fh, p: p}
+ if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ countError("frame_settings_window_size_too_big")
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, ConnectionError(ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+ return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ for i := 0; i < f.NumSettings(); i++ {
+ if s := f.Setting(i); s.ID == id {
+ return s.Val, true
+ }
+ }
+ return 0, false
+}
+
+// Setting returns the setting from the frame at the given 0-based index.
+// The index must be >= 0 and less than f.NumSettings().
+func (f *SettingsFrame) Setting(i int) Setting {
+ buf := f.p
+ return Setting{
+ ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
+ Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
+ }
+}
+
+func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 }
+
+// HasDuplicates reports whether f contains any duplicate setting IDs.
+func (f *SettingsFrame) HasDuplicates() bool {
+ num := f.NumSettings()
+ if num == 0 {
+ return false
+ }
+ // If it's small enough (the common case), just do the n^2
+ // thing and avoid a map allocation.
+ if num < 10 {
+ for i := 0; i < num; i++ {
+ idi := f.Setting(i).ID
+ for j := i + 1; j < num; j++ {
+ idj := f.Setting(j).ID
+ if idi == idj {
+ return true
+ }
+ }
+ }
+ return false
+ }
+ seen := map[SettingID]bool{}
+ for i := 0; i < num; i++ {
+ id := f.Setting(i).ID
+ if seen[id] {
+ return true
+ }
+ seen[id] = true
+ }
+ return false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+ f.checkValid()
+ for i := 0; i < f.NumSettings(); i++ {
+ if err := fn(f.Setting(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+ f.startWrite(FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+ f.startWrite(FrameSettings, FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.7
+type PingFrame struct {
+ FrameHeader
+ Data [8]byte
+}
+
+func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
+
+func parsePingFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
+ if len(payload) != 8 {
+ countError("frame_ping_length")
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ countError("frame_ping_has_stream")
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f := &PingFrame{FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+ var flags Flags
+ if ack {
+ flags = FlagPingAck
+ }
+ f.startWrite(FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.8
+type GoAwayFrame struct {
+ FrameHeader
+ LastStreamID uint32
+ ErrCode ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func parseGoAwayFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
+ if fh.StreamID != 0 {
+ countError("frame_goaway_has_stream")
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ countError("frame_goaway_short")
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ return &GoAwayFrame{
+ FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+ f.startWrite(FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+ FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
+ return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.9
+type WindowUpdateFrame struct {
+ FrameHeader
+ Increment uint32 // never read with high bit set
+}
+
+func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
+ if len(p) != 4 {
+ countError("frame_windowupdate_bad_len")
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ countError("frame_windowupdate_zero_inc_conn")
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ countError("frame_windowupdate_zero_inc_stream")
+ return nil, streamError(fh.StreamID, ErrCodeProtocol)
+ }
+ return &WindowUpdateFrame{
+ FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+ FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
+ hf := &HeadersFrame{
+ FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ countError("frame_headers_zero_stream")
+ return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+ }
+ var padLength uint8
+ if fh.Flags.Has(FlagHeadersPadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ countError("frame_headers_pad_short")
+ return
+ }
+ }
+ if fh.Flags.Has(FlagHeadersPriority) {
+ var v uint32
+ p, v, err = readUint32(p)
+ if err != nil {
+ countError("frame_headers_prio_short")
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = readByte(p)
+ if err != nil {
+ countError("frame_headers_prio_weight_short")
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) < 0 {
+ countError("frame_headers_pad_too_big")
+ return nil, streamError(fh.StreamID, ErrCodeProtocol)
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= FlagHeadersPriority
+ }
+ f.startWrite(FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+ return errDepStreamID
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.3
+type PriorityFrame struct {
+ FrameHeader
+ PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+ return p == PriorityParam{}
+}
+
+func parsePriorityFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ countError("frame_priority_zero_stream")
+ return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+ }
+ if len(payload) != 5 {
+ countError("frame_priority_bad_length")
+ return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &PriorityFrame{
+ FrameHeader: fh,
+ PriorityParam: PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ if !validStreamIDOrZero(p.StreamDep) {
+ return errDepStreamID
+ }
+ f.startWrite(FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.4
+type RSTStreamFrame struct {
+ FrameHeader
+ ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
+ if len(p) != 4 {
+ countError("frame_rststream_bad_len")
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ countError("frame_rststream_zero_stream")
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.10
+type ContinuationFrame struct {
+ FrameHeader
+ headerFragBuf []byte
+}
+
+func parseContinuationFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ countError("frame_continuation_zero_stream")
+ return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
+ return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endHeaders {
+ flags |= FlagContinuationEndHeaders
+ }
+ f.startWrite(FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.6
+type PushPromiseFrame struct {
+ FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
+ pp := &PushPromiseFrame{
+ FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ countError("frame_pushpromise_zero_stream")
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(FlagPushPromisePadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ countError("frame_pushpromise_pad_short")
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = readUint32(p)
+ if err != nil {
+ countError("frame_pushpromise_promiseid_short")
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ countError("frame_pushpromise_pad_too_big")
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= FlagPushPromiseEndHeaders
+ }
+ f.startWrite(FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+ StreamEnded() bool
+}
+
+type headersEnder interface {
+ HeadersEnded() bool
+}
+
+type headersOrContinuation interface {
+ headersEnder
+ HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type MetaHeadersFrame struct {
+ *HeadersFrame
+
+ // Fields are the fields contained in the HEADERS and
+ // CONTINUATION frames. The underlying slice is owned by the
+ // Framer and must not be retained after the next call to
+ // ReadFrame.
+ //
+ // Fields are guaranteed to be in the correct http2 order and
+ // not have unknown pseudo header fields or invalid header
+ // field names or values. Required pseudo header fields may be
+ // missing, however. Use the MetaHeadersFrame.Pseudo accessor
+ // method access pseudo headers.
+ Fields []hpack.HeaderField
+
+ // Truncated is whether the max header list size limit was hit
+ // and Fields is incomplete. The hpack decoder state is still
+ // valid, however.
+ Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {
+ for _, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return ""
+ }
+ if hf.Name[1:] == pseudo {
+ return hf.Value
+ }
+ }
+ return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[i:]
+ }
+ }
+ return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[:i]
+ }
+ }
+ return mh.Fields
+}
+
+func (mh *MetaHeadersFrame) checkPseudos() error {
+ var isRequest, isResponse bool
+ pf := mh.PseudoFields()
+ for i, hf := range pf {
+ switch hf.Name {
+ case ":method", ":path", ":scheme", ":authority", ":protocol":
+ isRequest = true
+ case ":status":
+ isResponse = true
+ default:
+ return pseudoHeaderError(hf.Name)
+ }
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 5.
+ // And this doesn't allocate.
+ for _, hf2 := range pf[:i] {
+ if hf.Name == hf2.Name {
+ return duplicatePseudoHeaderError(hf.Name)
+ }
+ }
+ }
+ if isRequest && isResponse {
+ return errMixPseudoHeaderTypes
+ }
+ return nil
+}
+
+func (fr *Framer) maxHeaderStringLen() int {
+ v := int(fr.maxHeaderListSize())
+ if v < 0 {
+ // If maxHeaderListSize overflows an int, use no limit (0).
+ return 0
+ }
+ return v
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) {
+ if fr.AllowIllegalReads {
+ return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+ }
+ mh := &MetaHeadersFrame{
+ HeadersFrame: hf,
+ }
+ var remainSize = fr.maxHeaderListSize()
+ var sawRegular bool
+
+ var invalid error // pseudo header field errors
+ hdec := fr.ReadMetaHeaders
+ hdec.SetEmitEnabled(true)
+ hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+ hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if VerboseLogs && fr.logReads {
+ fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
+ }
+ if !httpguts.ValidHeaderFieldValue(hf.Value) {
+ // Don't include the value in the error, because it may be sensitive.
+ invalid = headerFieldValueError(hf.Name)
+ }
+ isPseudo := strings.HasPrefix(hf.Name, ":")
+ if isPseudo {
+ if sawRegular {
+ invalid = errPseudoAfterRegular
+ }
+ } else {
+ sawRegular = true
+ if !validWireHeaderFieldName(hf.Name) {
+ invalid = headerFieldNameError(hf.Name)
+ }
+ }
+
+ if invalid != nil {
+ hdec.SetEmitEnabled(false)
+ return
+ }
+
+ size := hf.Size()
+ if size > remainSize {
+ hdec.SetEmitEnabled(false)
+ mh.Truncated = true
+ remainSize = 0
+ return
+ }
+ remainSize -= size
+
+ mh.Fields = append(mh.Fields, hf)
+ })
+ // Lose reference to MetaHeadersFrame:
+ defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+ var hc headersOrContinuation = hf
+ for {
+ frag := hc.HeaderBlockFragment()
+
+ // Avoid parsing large amounts of headers that we will then discard.
+ // If the sender exceeds the max header list size by too much,
+ // skip parsing the fragment and close the connection.
+ //
+ // "Too much" is either any CONTINUATION frame after we've already
+ // exceeded the max header list size (in which case remainSize is 0),
+ // or a frame whose encoded size is more than twice the remaining
+ // header list bytes we're willing to accept.
+ if int64(len(frag)) > int64(2*remainSize) {
+ if VerboseLogs {
+ log.Printf("http2: header list too large")
+ }
+ // It would be nice to send a RST_STREAM before sending the GOAWAY,
+ // but the structure of the server's frame writer makes this difficult.
+ return mh, ConnectionError(ErrCodeProtocol)
+ }
+
+ // Also close the connection after any CONTINUATION frame following an
+ // invalid header, since we stop tracking the size of the headers after
+ // an invalid one.
+ if invalid != nil {
+ if VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ // It would be nice to send a RST_STREAM before sending the GOAWAY,
+ // but the structure of the server's frame writer makes this difficult.
+ return mh, ConnectionError(ErrCodeProtocol)
+ }
+
+ if _, err := hdec.Write(frag); err != nil {
+ return mh, ConnectionError(ErrCodeCompression)
+ }
+
+ if hc.HeadersEnded() {
+ break
+ }
+ if f, err := fr.ReadFrame(); err != nil {
+ return nil, err
+ } else {
+ hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder
+ }
+ }
+
+ mh.HeadersFrame.headerFragBuf = nil
+ mh.HeadersFrame.invalidate()
+
+ if err := hdec.Close(); err != nil {
+ return mh, ConnectionError(ErrCodeCompression)
+ }
+ if invalid != nil {
+ fr.errDetail = invalid
+ if VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
+ }
+ if err := mh.checkPseudos(); err != nil {
+ fr.errDetail = err
+ if VerboseLogs {
+ log.Printf("http2: invalid pseudo headers: %v", err)
+ }
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
+ }
+ return mh, nil
+}
+
+func summarizeFrame(f Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
new file mode 100644
index 0000000..9933c9f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+ if !DebugGoroutines {
+ return 0
+ }
+ return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g goroutineLock) checkNotOn() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+ bp := littleBuf.Get().(*[]byte)
+ defer littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = cutoff64(base)
+ maxVal = 1<= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
new file mode 100644
index 0000000..46219da
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -0,0 +1,245 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "io"
+)
+
+const (
+ uint32Max = ^uint32(0)
+ initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+ dynTab dynamicTable
+ // minSize is the minimum table size set by
+ // SetMaxDynamicTableSize after the previous Header Table Size
+ // Update.
+ minSize uint32
+ // maxSizeLimit is the maximum table size this encoder
+ // supports. This will protect the encoder from too large
+ // size.
+ maxSizeLimit uint32
+ // tableSizeUpdate indicates whether "Header Table Size
+ // Update" is required.
+ tableSizeUpdate bool
+ w io.Writer
+ buf []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{
+ minSize: uint32Max,
+ maxSizeLimit: initialHeaderTableSize,
+ tableSizeUpdate: false,
+ w: w,
+ }
+ e.dynTab.table.init()
+ e.dynTab.setMaxSize(initialHeaderTableSize)
+ return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary. If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+ e.buf = e.buf[:0]
+
+ if e.tableSizeUpdate {
+ e.tableSizeUpdate = false
+ if e.minSize < e.dynTab.maxSize {
+ e.buf = appendTableSize(e.buf, e.minSize)
+ }
+ e.minSize = uint32Max
+ e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+ }
+
+ idx, nameValueMatch := e.searchTable(f)
+ if nameValueMatch {
+ e.buf = appendIndexed(e.buf, idx)
+ } else {
+ indexing := e.shouldIndex(f)
+ if indexing {
+ e.dynTab.add(f)
+ }
+
+ if idx == 0 {
+ e.buf = appendNewName(e.buf, f, indexing)
+ } else {
+ e.buf = appendIndexedName(e.buf, f, idx, indexing)
+ }
+ }
+ n, err := e.w.Write(e.buf)
+ if err == nil && n != len(e.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+ i, nameValueMatch = staticTable.search(f)
+ if nameValueMatch {
+ return i, true
+ }
+
+ j, nameValueMatch := e.dynTab.table.search(f)
+ if nameValueMatch || (i == 0 && j != 0) {
+ return j + uint64(staticTable.len()), nameValueMatch
+ }
+
+ return i, false
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+ if v > e.maxSizeLimit {
+ v = e.maxSizeLimit
+ }
+ if v < e.minSize {
+ e.minSize = v
+ }
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+}
+
+// MaxDynamicTableSize returns the current dynamic header table size.
+func (e *Encoder) MaxDynamicTableSize() (v uint32) {
+ return e.dynTab.maxSize
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+ e.maxSizeLimit = v
+ if e.dynTab.maxSize > v {
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+ }
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+ return !f.Sensitive && f.Size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, i)
+ dst[first] |= 0x80
+ return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+ dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+ dst = appendHpackString(dst, f.Name)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+ first := len(dst)
+ var n byte
+ if indexing {
+ n = 6
+ } else {
+ n = 4
+ }
+ dst = appendVarInt(dst, n, i)
+ dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 5, uint64(v))
+ dst[first] |= 0x20
+ return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// https://httpwg.org/specs/rfc7541.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+ k := uint64((1 << n) - 1)
+ if i < k {
+ return append(dst, byte(i))
+ }
+ dst = append(dst, byte(k))
+ i -= k
+ for ; i >= 128; i >>= 7 {
+ dst = append(dst, byte(0x80|(i&0x7f)))
+ }
+ return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+ huffmanLength := HuffmanEncodeLength(s)
+ if huffmanLength < uint64(len(s)) {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, huffmanLength)
+ dst = AppendHuffmanString(dst, s)
+ dst[first] |= 0x80
+ } else {
+ dst = appendVarInt(dst, 7, uint64(len(s)))
+ dst = append(dst, s...)
+ }
+ return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+ if sensitive {
+ return 0x10
+ }
+ if indexing {
+ return 0x40
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
new file mode 100644
index 0000000..7a1d976
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -0,0 +1,523 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+ Err error
+}
+
+func (de DecodingError) Error() string {
+ return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+ return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+ Name, Value string
+
+ // Sensitive means that this header field should never be
+ // indexed.
+ Sensitive bool
+}
+
+// IsPseudo reports whether the header field is an http2 pseudo header.
+// That is, it reports whether it starts with a colon.
+// It is not otherwise guaranteed to be a valid pseudo header field,
+// though.
+func (hf HeaderField) IsPseudo() bool {
+ return len(hf.Name) != 0 && hf.Name[0] == ':'
+}
+
+func (hf HeaderField) String() string {
+ var suffix string
+ if hf.Sensitive {
+ suffix = " (sensitive)"
+ }
+ return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
+}
+
+// Size returns the size of an entry per RFC 7541 section 4.1.
+func (hf HeaderField) Size() uint32 {
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.4.1
+ // "The size of the dynamic table is the sum of the size of
+ // its entries. The size of an entry is the sum of its name's
+ // length in octets (as defined in Section 5.2), its value's
+ // length in octets (see Section 5.2), plus 32. The size of
+ // an entry is calculated using the length of the name and
+ // value without any Huffman encoding applied."
+
+ // This can overflow if somebody makes a large HeaderField
+ // Name and/or Value by hand, but we don't care, because that
+ // won't happen on the wire because the encoding doesn't allow
+ // it.
+ return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+ dynTab dynamicTable
+ emit func(f HeaderField)
+
+ emitEnabled bool // whether calls to emit are enabled
+ maxStrLen int // 0 means unlimited
+
+ // buf is the unparsed buffer. It's only written to
+ // saveBuf if it was truncated in the middle of a header
+ // block. Because it's usually not owned, we can only
+ // process it under Write.
+ buf []byte // not owned; only valid during Write
+
+ // saveBuf is previous data passed to Write which we weren't able
+ // to fully parse before. Unlike buf, we own this data.
+ saveBuf bytes.Buffer
+
+ firstField bool // processing the first field of the header block
+}
+
+// NewDecoder returns a new decoder with the provided maximum dynamic
+// table size. The emitFunc will be called for each valid field
+// parsed, in the same goroutine as calls to Write, before Write returns.
+func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
+ d := &Decoder{
+ emit: emitFunc,
+ emitEnabled: true,
+ firstField: true,
+ }
+ d.dynTab.table.init()
+ d.dynTab.allowedMaxSize = maxDynamicTableSize
+ d.dynTab.setMaxSize(maxDynamicTableSize)
+ return d
+}
+
+// ErrStringLength is returned by Decoder.Write when the max string length
+// (as configured by Decoder.SetMaxStringLength) would be violated.
+var ErrStringLength = errors.New("hpack: string too long")
+
+// SetMaxStringLength sets the maximum size of a HeaderField name or
+// value string. If a string exceeds this length (even after any
+// decompression), Write will return ErrStringLength.
+// A value of 0 means unlimited and is the default from NewDecoder.
+func (d *Decoder) SetMaxStringLength(n int) {
+ d.maxStrLen = n
+}
+
+// SetEmitFunc changes the callback used when new header fields
+// are decoded.
+// It must be non-nil. It does not affect EmitEnabled.
+func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
+ d.emit = emitFunc
+}
+
+// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
+// should be called. The default is true.
+//
+// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
+// while still decoding and keeping in-sync with decoder state, but
+// without doing unnecessary decompression or generating unnecessary
+// garbage for header fields past the limit.
+func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
+
+// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
+// are currently enabled. The default is true.
+func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+ d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+ d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.2.3.2
+ table headerFieldTable
+ size uint32 // in bytes
+ maxSize uint32 // current maxSize
+ allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+ dt.maxSize = v
+ dt.evict()
+}
+
+func (dt *dynamicTable) add(f HeaderField) {
+ dt.table.addEntry(f)
+ dt.size += f.Size()
+ dt.evict()
+}
+
+// If we're too big, evict old stuff.
+func (dt *dynamicTable) evict() {
+ var n int
+ for dt.size > dt.maxSize && n < dt.table.len() {
+ dt.size -= dt.table.ents[n].Size()
+ n++
+ }
+ dt.table.evictOldest(n)
+}
+
+func (d *Decoder) maxTableIndex() int {
+ // This should never overflow. RFC 7540 Section 6.5.2 limits the size of
+ // the dynamic table to 2^32 bytes, where each entry will occupy more than
+ // one byte. Further, the staticTable has a fixed, small length.
+ return d.dynTab.table.len() + staticTable.len()
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+ // See Section 2.3.3.
+ if i == 0 {
+ return
+ }
+ if i <= uint64(staticTable.len()) {
+ return staticTable.ents[i-1], true
+ }
+ if i > uint64(d.maxTableIndex()) {
+ return
+ }
+ // In the dynamic table, newer entries have lower indices.
+ // However, dt.ents[0] is the oldest entry. Hence, dt.ents is
+ // the reversed dynamic table.
+ dt := d.dynTab.table
+ return dt.ents[dt.len()-(int(i)-staticTable.len())], true
+}
+
+// DecodeFull decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+ var hf []HeaderField
+ saveFunc := d.emit
+ defer func() { d.emit = saveFunc }()
+ d.emit = func(f HeaderField) { hf = append(hf, f) }
+ if _, err := d.Write(p); err != nil {
+ return nil, err
+ }
+ if err := d.Close(); err != nil {
+ return nil, err
+ }
+ return hf, nil
+}
+
+// Close declares that the decoding is complete and resets the Decoder
+// to be reused again for a new header block. If there is any remaining
+// data in the decoder's buffer, Close returns an error.
+func (d *Decoder) Close() error {
+ if d.saveBuf.Len() > 0 {
+ d.saveBuf.Reset()
+ return DecodingError{errors.New("truncated headers")}
+ }
+ d.firstField = true
+ return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ // Prevent state machine CPU attacks (making us redo
+ // work up to the point of finding out we don't have
+ // enough data)
+ return
+ }
+ // Only copy the data if we have to. Optimistically assume
+ // that p will contain a complete header block.
+ if d.saveBuf.Len() == 0 {
+ d.buf = p
+ } else {
+ d.saveBuf.Write(p)
+ d.buf = d.saveBuf.Bytes()
+ d.saveBuf.Reset()
+ }
+
+ for len(d.buf) > 0 {
+ err = d.parseHeaderFieldRepr()
+ if err == errNeedMore {
+ // Extra paranoia, making sure saveBuf won't
+ // get too large. All the varint and string
+ // reading code earlier should already catch
+ // overlong things and return ErrStringLength,
+ // but keep this as a last resort.
+ const varIntOverhead = 8 // conservative
+ if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
+ return 0, ErrStringLength
+ }
+ d.saveBuf.Write(d.buf)
+ return len(p), nil
+ }
+ d.firstField = false
+ if err != nil {
+ break
+ }
+ }
+ return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+ indexedTrue indexType = iota
+ indexedFalse
+ indexedNever
+)
+
+func (v indexType) indexed() bool { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+ b := d.buf[0]
+ switch {
+ case b&128 != 0:
+ // Indexed representation.
+ // High bit set?
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.1
+ return d.parseFieldIndexed()
+ case b&192 == 64:
+ // 6.2.1 Literal Header Field with Incremental Indexing
+ // 0b10xxxxxx: top two bits are 10
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.1
+ return d.parseFieldLiteral(6, indexedTrue)
+ case b&240 == 0:
+ // 6.2.2 Literal Header Field without Indexing
+ // 0b0000xxxx: top four bits are 0000
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.2
+ return d.parseFieldLiteral(4, indexedFalse)
+ case b&240 == 16:
+ // 6.2.3 Literal Header Field never Indexed
+ // 0b0001xxxx: top four bits are 0001
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.3
+ return d.parseFieldLiteral(4, indexedNever)
+ case b&224 == 32:
+ // 6.3 Dynamic Table Size Update
+ // Top three bits are '001'.
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.3
+ return d.parseDynamicTableSizeUpdate()
+ }
+
+ return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+ buf := d.buf
+ idx, buf, err := readVarInt(7, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(idx)
+ if !ok {
+ return DecodingError{InvalidIndexError(idx)}
+ }
+ d.buf = buf
+ return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ buf := d.buf
+ nameIdx, buf, err := readVarInt(n, buf)
+ if err != nil {
+ return err
+ }
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
+ var undecodedName undecodedString
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+ return DecodingError{InvalidIndexError(nameIdx)}
+ }
+ hf.Name = ihf.Name
+ } else {
+ undecodedName, buf, err = d.readString(buf)
+ if err != nil {
+ return err
+ }
+ }
+ undecodedValue, buf, err := d.readString(buf)
+ if err != nil {
+ return err
+ }
+ if wantStr {
+ if nameIdx <= 0 {
+ hf.Name, err = d.decodeString(undecodedName)
+ if err != nil {
+ return err
+ }
+ }
+ hf.Value, err = d.decodeString(undecodedValue)
+ if err != nil {
+ return err
+ }
+ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+ }
+ hf.Sensitive = it.sensitive()
+ return d.callEmit(hf)
+}
+
+func (d *Decoder) callEmit(hf HeaderField) error {
+ if d.maxStrLen != 0 {
+ if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
+ return ErrStringLength
+ }
+ }
+ if d.emitEnabled {
+ d.emit(hf)
+ }
+ return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+ // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
+ // beginning of the first header block following the change to the dynamic table size.
+ if !d.firstField && d.dynTab.size > 0 {
+ return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
+ }
+
+ buf := d.buf
+ size, buf, err := readVarInt(5, buf)
+ if err != nil {
+ return err
+ }
+ if size > uint64(d.dynTab.allowedMaxSize) {
+ return DecodingError{errors.New("dynamic table size update too large")}
+ }
+ d.dynTab.setMaxSize(uint32(size))
+ d.buf = buf
+ return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// https://httpwg.org/specs/rfc7541.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ if n < 1 || n > 8 {
+ panic("bad n")
+ }
+ if len(p) == 0 {
+ return 0, p, errNeedMore
+ }
+ i = uint64(p[0])
+ if n < 8 {
+ i &= (1 << uint64(n)) - 1
+ }
+ if i < (1< 0 {
+ b := p[0]
+ p = p[1:]
+ i += uint64(b&127) << m
+ if b&128 == 0 {
+ return i, p, nil
+ }
+ m += 7
+ if m >= 63 { // TODO: proper overflow check. making this up.
+ return 0, origP, errVarintOverflow
+ }
+ }
+ return 0, origP, errNeedMore
+}
+
+// readString reads an hpack string from p.
+//
+// It returns a reference to the encoded string data to permit deferring decode costs
+// until after the caller verifies all data is present.
+func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
+ if len(p) == 0 {
+ return u, p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+ return u, p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+ // Returning an error here means Huffman decoding errors
+ // for non-indexed strings past the maximum string length
+ // are ignored, but the server is returning an error anyway
+ // and because the string is not indexed the error will not
+ // affect the decoding state.
+ return u, nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+ return u, p, errNeedMore
+ }
+ u.isHuff = isHuff
+ u.b = p[:strLen]
+ return u, p[strLen:], nil
+}
+
+type undecodedString struct {
+ isHuff bool
+ b []byte
+}
+
+func (d *Decoder) decodeString(u undecodedString) (string, error) {
+ if !u.isHuff {
+ return string(u.b), nil
+ }
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset() // don't trust others
+ var s string
+ err := huffmanDecode(buf, d.maxStrLen, u.b)
+ if err == nil {
+ s = buf.String()
+ }
+ buf.Reset() // be nice to GC
+ bufPool.Put(buf)
+ return s, err
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
new file mode 100644
index 0000000..20d083a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -0,0 +1,226 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return 0, err
+ }
+ return w.Write(buf.Bytes())
+}
+
+// HuffmanDecodeToString decodes the string in v.
+func HuffmanDecodeToString(v []byte) (string, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// ErrInvalidHuffman is returned for errors found decoding
+// Huffman-encoded strings.
+var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
+
+// huffmanDecode decodes v to buf.
+// If maxLen is greater than 0, attempts to write more to buf than
+// maxLen bytes will return ErrStringLength.
+func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
+ rootHuffmanNode := getRootHuffmanNode()
+ n := rootHuffmanNode
+ // cur is the bit buffer that has not been fed into n.
+ // cbits is the number of low order bits in cur that are valid.
+ // sbits is the number of bits of the symbol prefix being decoded.
+ cur, cbits, sbits := uint(0), uint8(0), uint8(0)
+ for _, b := range v {
+ cur = cur<<8 | uint(b)
+ cbits += 8
+ sbits += 8
+ for cbits >= 8 {
+ idx := byte(cur >> (cbits - 8))
+ n = n.children[idx]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children == nil {
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ } else {
+ cbits -= 8
+ }
+ }
+ }
+ for cbits > 0 {
+ n = n.children[byte(cur<<(8-cbits))]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children != nil || n.codeLen > cbits {
+ break
+ }
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ }
+ if sbits > 7 {
+ // Either there was an incomplete symbol, or overlong padding.
+ // Both are decoding errors per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+ if mask := uint(1< 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<= 32 {
+ n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift
+ y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32
+ dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y))
+ }
+ }
+ // Add padding bits if necessary
+ if over := n % 8; over > 0 {
+ const (
+ eosCode = 0x3fffffff
+ eosNBits = 30
+ eosPadByte = eosCode >> (eosNBits - 8)
+ )
+ pad := 8 - over
+ x = (x << pad) | (eosPadByte >> over)
+ n += pad // 8 now divides into n exactly
+ }
+ // n in (0, 8, 16, 24, 32)
+ switch n / 8 {
+ case 0:
+ return dst
+ case 1:
+ return append(dst, byte(x))
+ case 2:
+ y := uint16(x)
+ return append(dst, byte(y>>8), byte(y))
+ case 3:
+ y := uint16(x >> 8)
+ return append(dst, byte(y>>8), byte(y), byte(x))
+ }
+ // case 4:
+ y := uint32(x)
+ return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y))
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+ n := uint64(0)
+ for i := 0; i < len(s); i++ {
+ n += uint64(huffmanCodeLen[s[i]])
+ }
+ return (n + 7) / 8
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/static_table.go b/vendor/golang.org/x/net/http2/hpack/static_table.go
new file mode 100644
index 0000000..754a1eb
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/static_table.go
@@ -0,0 +1,188 @@
+// go generate gen.go
+// Code generated by the command above; DO NOT EDIT.
+
+package hpack
+
+var staticTable = &headerFieldTable{
+ evictCount: 0,
+ byName: map[string]uint64{
+ ":authority": 1,
+ ":method": 3,
+ ":path": 5,
+ ":scheme": 7,
+ ":status": 14,
+ "accept-charset": 15,
+ "accept-encoding": 16,
+ "accept-language": 17,
+ "accept-ranges": 18,
+ "accept": 19,
+ "access-control-allow-origin": 20,
+ "age": 21,
+ "allow": 22,
+ "authorization": 23,
+ "cache-control": 24,
+ "content-disposition": 25,
+ "content-encoding": 26,
+ "content-language": 27,
+ "content-length": 28,
+ "content-location": 29,
+ "content-range": 30,
+ "content-type": 31,
+ "cookie": 32,
+ "date": 33,
+ "etag": 34,
+ "expect": 35,
+ "expires": 36,
+ "from": 37,
+ "host": 38,
+ "if-match": 39,
+ "if-modified-since": 40,
+ "if-none-match": 41,
+ "if-range": 42,
+ "if-unmodified-since": 43,
+ "last-modified": 44,
+ "link": 45,
+ "location": 46,
+ "max-forwards": 47,
+ "proxy-authenticate": 48,
+ "proxy-authorization": 49,
+ "range": 50,
+ "referer": 51,
+ "refresh": 52,
+ "retry-after": 53,
+ "server": 54,
+ "set-cookie": 55,
+ "strict-transport-security": 56,
+ "transfer-encoding": 57,
+ "user-agent": 58,
+ "vary": 59,
+ "via": 60,
+ "www-authenticate": 61,
+ },
+ byNameValue: map[pairNameValue]uint64{
+ {name: ":authority", value: ""}: 1,
+ {name: ":method", value: "GET"}: 2,
+ {name: ":method", value: "POST"}: 3,
+ {name: ":path", value: "/"}: 4,
+ {name: ":path", value: "/index.html"}: 5,
+ {name: ":scheme", value: "http"}: 6,
+ {name: ":scheme", value: "https"}: 7,
+ {name: ":status", value: "200"}: 8,
+ {name: ":status", value: "204"}: 9,
+ {name: ":status", value: "206"}: 10,
+ {name: ":status", value: "304"}: 11,
+ {name: ":status", value: "400"}: 12,
+ {name: ":status", value: "404"}: 13,
+ {name: ":status", value: "500"}: 14,
+ {name: "accept-charset", value: ""}: 15,
+ {name: "accept-encoding", value: "gzip, deflate"}: 16,
+ {name: "accept-language", value: ""}: 17,
+ {name: "accept-ranges", value: ""}: 18,
+ {name: "accept", value: ""}: 19,
+ {name: "access-control-allow-origin", value: ""}: 20,
+ {name: "age", value: ""}: 21,
+ {name: "allow", value: ""}: 22,
+ {name: "authorization", value: ""}: 23,
+ {name: "cache-control", value: ""}: 24,
+ {name: "content-disposition", value: ""}: 25,
+ {name: "content-encoding", value: ""}: 26,
+ {name: "content-language", value: ""}: 27,
+ {name: "content-length", value: ""}: 28,
+ {name: "content-location", value: ""}: 29,
+ {name: "content-range", value: ""}: 30,
+ {name: "content-type", value: ""}: 31,
+ {name: "cookie", value: ""}: 32,
+ {name: "date", value: ""}: 33,
+ {name: "etag", value: ""}: 34,
+ {name: "expect", value: ""}: 35,
+ {name: "expires", value: ""}: 36,
+ {name: "from", value: ""}: 37,
+ {name: "host", value: ""}: 38,
+ {name: "if-match", value: ""}: 39,
+ {name: "if-modified-since", value: ""}: 40,
+ {name: "if-none-match", value: ""}: 41,
+ {name: "if-range", value: ""}: 42,
+ {name: "if-unmodified-since", value: ""}: 43,
+ {name: "last-modified", value: ""}: 44,
+ {name: "link", value: ""}: 45,
+ {name: "location", value: ""}: 46,
+ {name: "max-forwards", value: ""}: 47,
+ {name: "proxy-authenticate", value: ""}: 48,
+ {name: "proxy-authorization", value: ""}: 49,
+ {name: "range", value: ""}: 50,
+ {name: "referer", value: ""}: 51,
+ {name: "refresh", value: ""}: 52,
+ {name: "retry-after", value: ""}: 53,
+ {name: "server", value: ""}: 54,
+ {name: "set-cookie", value: ""}: 55,
+ {name: "strict-transport-security", value: ""}: 56,
+ {name: "transfer-encoding", value: ""}: 57,
+ {name: "user-agent", value: ""}: 58,
+ {name: "vary", value: ""}: 59,
+ {name: "via", value: ""}: 60,
+ {name: "www-authenticate", value: ""}: 61,
+ },
+ ents: []HeaderField{
+ {Name: ":authority", Value: "", Sensitive: false},
+ {Name: ":method", Value: "GET", Sensitive: false},
+ {Name: ":method", Value: "POST", Sensitive: false},
+ {Name: ":path", Value: "/", Sensitive: false},
+ {Name: ":path", Value: "/index.html", Sensitive: false},
+ {Name: ":scheme", Value: "http", Sensitive: false},
+ {Name: ":scheme", Value: "https", Sensitive: false},
+ {Name: ":status", Value: "200", Sensitive: false},
+ {Name: ":status", Value: "204", Sensitive: false},
+ {Name: ":status", Value: "206", Sensitive: false},
+ {Name: ":status", Value: "304", Sensitive: false},
+ {Name: ":status", Value: "400", Sensitive: false},
+ {Name: ":status", Value: "404", Sensitive: false},
+ {Name: ":status", Value: "500", Sensitive: false},
+ {Name: "accept-charset", Value: "", Sensitive: false},
+ {Name: "accept-encoding", Value: "gzip, deflate", Sensitive: false},
+ {Name: "accept-language", Value: "", Sensitive: false},
+ {Name: "accept-ranges", Value: "", Sensitive: false},
+ {Name: "accept", Value: "", Sensitive: false},
+ {Name: "access-control-allow-origin", Value: "", Sensitive: false},
+ {Name: "age", Value: "", Sensitive: false},
+ {Name: "allow", Value: "", Sensitive: false},
+ {Name: "authorization", Value: "", Sensitive: false},
+ {Name: "cache-control", Value: "", Sensitive: false},
+ {Name: "content-disposition", Value: "", Sensitive: false},
+ {Name: "content-encoding", Value: "", Sensitive: false},
+ {Name: "content-language", Value: "", Sensitive: false},
+ {Name: "content-length", Value: "", Sensitive: false},
+ {Name: "content-location", Value: "", Sensitive: false},
+ {Name: "content-range", Value: "", Sensitive: false},
+ {Name: "content-type", Value: "", Sensitive: false},
+ {Name: "cookie", Value: "", Sensitive: false},
+ {Name: "date", Value: "", Sensitive: false},
+ {Name: "etag", Value: "", Sensitive: false},
+ {Name: "expect", Value: "", Sensitive: false},
+ {Name: "expires", Value: "", Sensitive: false},
+ {Name: "from", Value: "", Sensitive: false},
+ {Name: "host", Value: "", Sensitive: false},
+ {Name: "if-match", Value: "", Sensitive: false},
+ {Name: "if-modified-since", Value: "", Sensitive: false},
+ {Name: "if-none-match", Value: "", Sensitive: false},
+ {Name: "if-range", Value: "", Sensitive: false},
+ {Name: "if-unmodified-since", Value: "", Sensitive: false},
+ {Name: "last-modified", Value: "", Sensitive: false},
+ {Name: "link", Value: "", Sensitive: false},
+ {Name: "location", Value: "", Sensitive: false},
+ {Name: "max-forwards", Value: "", Sensitive: false},
+ {Name: "proxy-authenticate", Value: "", Sensitive: false},
+ {Name: "proxy-authorization", Value: "", Sensitive: false},
+ {Name: "range", Value: "", Sensitive: false},
+ {Name: "referer", Value: "", Sensitive: false},
+ {Name: "refresh", Value: "", Sensitive: false},
+ {Name: "retry-after", Value: "", Sensitive: false},
+ {Name: "server", Value: "", Sensitive: false},
+ {Name: "set-cookie", Value: "", Sensitive: false},
+ {Name: "strict-transport-security", Value: "", Sensitive: false},
+ {Name: "transfer-encoding", Value: "", Sensitive: false},
+ {Name: "user-agent", Value: "", Sensitive: false},
+ {Name: "vary", Value: "", Sensitive: false},
+ {Name: "via", Value: "", Sensitive: false},
+ {Name: "www-authenticate", Value: "", Sensitive: false},
+ },
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go
new file mode 100644
index 0000000..8cbdf3f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -0,0 +1,403 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "fmt"
+)
+
+// headerFieldTable implements a list of HeaderFields.
+// This is used to implement the static and dynamic tables.
+type headerFieldTable struct {
+ // For static tables, entries are never evicted.
+ //
+ // For dynamic tables, entries are evicted from ents[0] and added to the end.
+ // Each entry has a unique id that starts at one and increments for each
+ // entry that is added. This unique id is stable across evictions, meaning
+ // it can be used as a pointer to a specific entry. As in hpack, unique ids
+ // are 1-based. The unique id for ents[k] is k + evictCount + 1.
+ //
+ // Zero is not a valid unique id.
+ //
+ // evictCount should not overflow in any remotely practical situation. In
+ // practice, we will have one dynamic table per HTTP/2 connection. If we
+ // assume a very powerful server that handles 1M QPS per connection and each
+ // request adds (then evicts) 100 entries from the table, it would still take
+ // 2M years for evictCount to overflow.
+ ents []HeaderField
+ evictCount uint64
+
+ // byName maps a HeaderField name to the unique id of the newest entry with
+ // the same name. See above for a definition of "unique id".
+ byName map[string]uint64
+
+ // byNameValue maps a HeaderField name/value pair to the unique id of the newest
+ // entry with the same name and value. See above for a definition of "unique id".
+ byNameValue map[pairNameValue]uint64
+}
+
+type pairNameValue struct {
+ name, value string
+}
+
+func (t *headerFieldTable) init() {
+ t.byName = make(map[string]uint64)
+ t.byNameValue = make(map[pairNameValue]uint64)
+}
+
+// len reports the number of entries in the table.
+func (t *headerFieldTable) len() int {
+ return len(t.ents)
+}
+
+// addEntry adds a new entry.
+func (t *headerFieldTable) addEntry(f HeaderField) {
+ id := uint64(t.len()) + t.evictCount + 1
+ t.byName[f.Name] = id
+ t.byNameValue[pairNameValue{f.Name, f.Value}] = id
+ t.ents = append(t.ents, f)
+}
+
+// evictOldest evicts the n oldest entries in the table.
+func (t *headerFieldTable) evictOldest(n int) {
+ if n > t.len() {
+ panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
+ }
+ for k := 0; k < n; k++ {
+ f := t.ents[k]
+ id := t.evictCount + uint64(k) + 1
+ if t.byName[f.Name] == id {
+ delete(t.byName, f.Name)
+ }
+ if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
+ delete(t.byNameValue, p)
+ }
+ }
+ copy(t.ents, t.ents[n:])
+ for k := t.len() - n; k < t.len(); k++ {
+ t.ents[k] = HeaderField{} // so strings can be garbage collected
+ }
+ t.ents = t.ents[:t.len()-n]
+ if t.evictCount+uint64(n) < t.evictCount {
+ panic("evictCount overflow")
+ }
+ t.evictCount += uint64(n)
+}
+
+// search finds f in the table. If there is no match, i is 0.
+// If both name and value match, i is the matched index and nameValueMatch
+// becomes true. If only name matches, i points to that index and
+// nameValueMatch becomes false.
+//
+// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
+// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
+// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
+// table, the return value i actually refers to the entry t.ents[t.len()-i].
+//
+// All tables are assumed to be a dynamic tables except for the global staticTable.
+//
+// See Section 2.3.3.
+func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ if !f.Sensitive {
+ if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
+ return t.idToIndex(id), true
+ }
+ }
+ if id := t.byName[f.Name]; id != 0 {
+ return t.idToIndex(id), false
+ }
+ return 0, false
+}
+
+// idToIndex converts a unique id to an HPACK index.
+// See Section 2.3.3.
+func (t *headerFieldTable) idToIndex(id uint64) uint64 {
+ if id <= t.evictCount {
+ panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
+ }
+ k := id - t.evictCount - 1 // convert id to an index t.ents[k]
+ if t != staticTable {
+ return uint64(t.len()) - k // dynamic table
+ }
+ return k + 1
+}
+
+var huffmanCodes = [256]uint32{
+ 0x1ff8,
+ 0x7fffd8,
+ 0xfffffe2,
+ 0xfffffe3,
+ 0xfffffe4,
+ 0xfffffe5,
+ 0xfffffe6,
+ 0xfffffe7,
+ 0xfffffe8,
+ 0xffffea,
+ 0x3ffffffc,
+ 0xfffffe9,
+ 0xfffffea,
+ 0x3ffffffd,
+ 0xfffffeb,
+ 0xfffffec,
+ 0xfffffed,
+ 0xfffffee,
+ 0xfffffef,
+ 0xffffff0,
+ 0xffffff1,
+ 0xffffff2,
+ 0x3ffffffe,
+ 0xffffff3,
+ 0xffffff4,
+ 0xffffff5,
+ 0xffffff6,
+ 0xffffff7,
+ 0xffffff8,
+ 0xffffff9,
+ 0xffffffa,
+ 0xffffffb,
+ 0x14,
+ 0x3f8,
+ 0x3f9,
+ 0xffa,
+ 0x1ff9,
+ 0x15,
+ 0xf8,
+ 0x7fa,
+ 0x3fa,
+ 0x3fb,
+ 0xf9,
+ 0x7fb,
+ 0xfa,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x0,
+ 0x1,
+ 0x2,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x5c,
+ 0xfb,
+ 0x7ffc,
+ 0x20,
+ 0xffb,
+ 0x3fc,
+ 0x1ffa,
+ 0x21,
+ 0x5d,
+ 0x5e,
+ 0x5f,
+ 0x60,
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x64,
+ 0x65,
+ 0x66,
+ 0x67,
+ 0x68,
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x6e,
+ 0x6f,
+ 0x70,
+ 0x71,
+ 0x72,
+ 0xfc,
+ 0x73,
+ 0xfd,
+ 0x1ffb,
+ 0x7fff0,
+ 0x1ffc,
+ 0x3ffc,
+ 0x22,
+ 0x7ffd,
+ 0x3,
+ 0x23,
+ 0x4,
+ 0x24,
+ 0x5,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x6,
+ 0x74,
+ 0x75,
+ 0x28,
+ 0x29,
+ 0x2a,
+ 0x7,
+ 0x2b,
+ 0x76,
+ 0x2c,
+ 0x8,
+ 0x9,
+ 0x2d,
+ 0x77,
+ 0x78,
+ 0x79,
+ 0x7a,
+ 0x7b,
+ 0x7ffe,
+ 0x7fc,
+ 0x3ffd,
+ 0x1ffd,
+ 0xffffffc,
+ 0xfffe6,
+ 0x3fffd2,
+ 0xfffe7,
+ 0xfffe8,
+ 0x3fffd3,
+ 0x3fffd4,
+ 0x3fffd5,
+ 0x7fffd9,
+ 0x3fffd6,
+ 0x7fffda,
+ 0x7fffdb,
+ 0x7fffdc,
+ 0x7fffdd,
+ 0x7fffde,
+ 0xffffeb,
+ 0x7fffdf,
+ 0xffffec,
+ 0xffffed,
+ 0x3fffd7,
+ 0x7fffe0,
+ 0xffffee,
+ 0x7fffe1,
+ 0x7fffe2,
+ 0x7fffe3,
+ 0x7fffe4,
+ 0x1fffdc,
+ 0x3fffd8,
+ 0x7fffe5,
+ 0x3fffd9,
+ 0x7fffe6,
+ 0x7fffe7,
+ 0xffffef,
+ 0x3fffda,
+ 0x1fffdd,
+ 0xfffe9,
+ 0x3fffdb,
+ 0x3fffdc,
+ 0x7fffe8,
+ 0x7fffe9,
+ 0x1fffde,
+ 0x7fffea,
+ 0x3fffdd,
+ 0x3fffde,
+ 0xfffff0,
+ 0x1fffdf,
+ 0x3fffdf,
+ 0x7fffeb,
+ 0x7fffec,
+ 0x1fffe0,
+ 0x1fffe1,
+ 0x3fffe0,
+ 0x1fffe2,
+ 0x7fffed,
+ 0x3fffe1,
+ 0x7fffee,
+ 0x7fffef,
+ 0xfffea,
+ 0x3fffe2,
+ 0x3fffe3,
+ 0x3fffe4,
+ 0x7ffff0,
+ 0x3fffe5,
+ 0x3fffe6,
+ 0x7ffff1,
+ 0x3ffffe0,
+ 0x3ffffe1,
+ 0xfffeb,
+ 0x7fff1,
+ 0x3fffe7,
+ 0x7ffff2,
+ 0x3fffe8,
+ 0x1ffffec,
+ 0x3ffffe2,
+ 0x3ffffe3,
+ 0x3ffffe4,
+ 0x7ffffde,
+ 0x7ffffdf,
+ 0x3ffffe5,
+ 0xfffff1,
+ 0x1ffffed,
+ 0x7fff2,
+ 0x1fffe3,
+ 0x3ffffe6,
+ 0x7ffffe0,
+ 0x7ffffe1,
+ 0x3ffffe7,
+ 0x7ffffe2,
+ 0xfffff2,
+ 0x1fffe4,
+ 0x1fffe5,
+ 0x3ffffe8,
+ 0x3ffffe9,
+ 0xffffffd,
+ 0x7ffffe3,
+ 0x7ffffe4,
+ 0x7ffffe5,
+ 0xfffec,
+ 0xfffff3,
+ 0xfffed,
+ 0x1fffe6,
+ 0x3fffe9,
+ 0x1fffe7,
+ 0x1fffe8,
+ 0x7ffff3,
+ 0x3fffea,
+ 0x3fffeb,
+ 0x1ffffee,
+ 0x1ffffef,
+ 0xfffff4,
+ 0xfffff5,
+ 0x3ffffea,
+ 0x7ffff4,
+ 0x3ffffeb,
+ 0x7ffffe6,
+ 0x3ffffec,
+ 0x3ffffed,
+ 0x7ffffe7,
+ 0x7ffffe8,
+ 0x7ffffe9,
+ 0x7ffffea,
+ 0x7ffffeb,
+ 0xffffffe,
+ 0x7ffffec,
+ 0x7ffffed,
+ 0x7ffffee,
+ 0x7ffffef,
+ 0x7fffff0,
+ 0x3ffffee,
+}
+
+var huffmanCodeLen = [256]uint8{
+ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+ 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+ 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+ 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+ 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+ 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+ 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+ 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+ 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+ 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+ 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+ 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+ 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
new file mode 100644
index 0000000..6c18ea2
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -0,0 +1,432 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+package http2 // import "golang.org/x/net/http2"
+
+import (
+ "bufio"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+var (
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+ inTests bool
+
+ // Enabling extended CONNECT by causes browsers to attempt to use
+ // WebSockets-over-HTTP/2. This results in problems when the server's websocket
+ // package doesn't support extended CONNECT.
+ //
+ // Disable extended CONNECT by default for now.
+ //
+ // Issue #71128.
+ disableExtendedConnectProtocol = true
+)
+
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ VerboseLogs = true
+ logFrameWrites = true
+ logFrameReads = true
+ }
+ if strings.Contains(e, "http2xconnect=1") {
+ disableExtendedConnectProtocol = false
+ }
+}
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // https://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2
+ initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ NextProtoTLS = "h2"
+
+ // https://httpwg.org/specs/rfc7540.html#SettingValues
+ initialHeaderTableSize = 4096
+
+ initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+// HTTP/2 stream states.
+//
+// See http://tools.ietf.org/html/rfc7540#section-5.1.
+//
+// For simplicity, the server code merges "reserved (local)" into
+// "half-closed (remote)". This is one less state transition to track.
+// The only downside is that we send PUSH_PROMISEs slightly less
+// liberally than allowable. More discussion here:
+// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
+//
+// "reserved (remote)" is omitted since the client code does not
+// support server push.
+const (
+ stateIdle streamState = iota
+ stateOpen
+ stateHalfClosedLocal
+ stateHalfClosedRemote
+ stateClosed
+)
+
+var stateName = [...]string{
+ stateIdle: "Idle",
+ stateOpen: "Open",
+ stateHalfClosedLocal: "HalfClosedLocal",
+ stateHalfClosedRemote: "HalfClosedRemote",
+ stateClosed: "Closed",
+}
+
+func (st streamState) String() string {
+ return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+ // ID is which setting is being set.
+ // See https://httpwg.org/specs/rfc7540.html#SettingFormat
+ ID SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ case SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ case SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ case SettingEnableConnectProtocol:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// https://httpwg.org/specs/rfc7540.html#iana-settings
+type SettingID uint16
+
+const (
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+ SettingEnableConnectProtocol SettingID = 0x8
+)
+
+var settingName = map[SettingID]string{
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+ SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL",
+}
+
+func (s SettingID) String() string {
+ if v, ok := settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httpguts.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+//
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !httpguts.IsTokenRune(r) {
+ return false
+ }
+ if 'A' <= r && r <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+func httpCodeString(code int) string {
+ switch code {
+ case 200:
+ return "200"
+ case 404:
+ return "404"
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+ _ incomparable
+ group synctestGroupInterface // immutable
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
+}
+
+func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+ return &bufferedWriter{
+ group: group,
+ conn: conn,
+ byteTimeout: timeout,
+ }
+}
+
+// bufWriterPoolBufferSize is the size of bufio.Writer's
+// buffers created using bufWriterPool.
+//
+// TODO: pick a less arbitrary value? this is a bit under
+// (3 x typical 1500 byte MTU) at least. Other than that,
+// not much thought went into it.
+const bufWriterPoolBufferSize = 4 << 10
+
+var bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
+ },
+}
+
+func (w *bufferedWriter) Available() int {
+ if w.bw == nil {
+ return bufWriterPoolBufferSize
+ }
+ return w.bw.Available()
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset((*bufferedWriterTimeoutWriter)(w))
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
+
+type bufferedWriterTimeoutWriter bufferedWriter
+
+func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
+ return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+}
+
+// writeWithByteTimeout writes to conn.
+// If more than timeout passes without any bytes being written to the connection,
+// the write fails.
+func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+ if timeout <= 0 {
+ return conn.Write(p)
+ }
+ for {
+ var now time.Time
+ if group == nil {
+ now = time.Now()
+ } else {
+ now = group.Now()
+ }
+ conn.SetWriteDeadline(now.Add(timeout))
+ nn, err := conn.Write(p[n:])
+ n += nn
+ if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
+ // Either we finished the write, made no progress, or hit the deadline.
+ // Whichever it is, we're done now.
+ conn.SetWriteDeadline(time.Time{})
+ return n, err
+ }
+ }
+}
+
+func mustUint31(v int32) uint32 {
+ if v < 0 || v > 2147483647 {
+ panic("out of range")
+ }
+ return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 7230, section 3.3.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type httpError struct {
+ _ incomparable
+ msg string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.msg }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
+
+var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
+
+type sorter struct {
+ v []string // owned by sorter
+}
+
+func (s *sorter) Len() int { return len(s.v) }
+func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
+func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *sorter) Keys(h http.Header) []string {
+ keys := s.v[:0]
+ for k := range h {
+ keys = append(keys, k)
+ }
+ s.v = keys
+ sort.Sort(s)
+ return keys
+}
+
+func (s *sorter) SortStrings(ss []string) {
+ // Our sorter works on s.v, which sorter owns, so
+ // stash it away while we sort the user's buffer.
+ save := s.v
+ s.v = ss
+ sort.Sort(s)
+ s.v = save
+}
+
+// incomparable is a zero-width, non-comparable type. Adding it to a struct
+// makes that struct also non-comparable, and generally doesn't add
+// any size (as long as it's first).
+type incomparable [0]func()
+
+// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
+// It's defined as an interface here to let us keep synctestGroup entirely test-only
+// and not a part of non-test builds.
+type synctestGroupInterface interface {
+ Join()
+ Now() time.Time
+ NewTimer(d time.Duration) timer
+ AfterFunc(d time.Duration, f func()) timer
+ ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
+}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
new file mode 100644
index 0000000..3b9f06b
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -0,0 +1,184 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type pipe struct {
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b pipeBuffer // nil when done reading
+ unread int // bytes unread when done
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
+}
+
+type pipeBuffer interface {
+ Len() int
+ io.Writer
+ io.Reader
+}
+
+// setBuffer initializes the pipe buffer.
+// It has no effect if the pipe is already closed.
+func (p *pipe) setBuffer(b pipeBuffer) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.err != nil || p.breakErr != nil {
+ return
+ }
+ p.b = b
+}
+
+func (p *pipe) Len() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.b == nil {
+ return p.unread
+ }
+ return p.b.Len()
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *pipe) Read(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
+ if p.b != nil && p.b.Len() > 0 {
+ return p.b.Read(d)
+ }
+ if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
+ p.b = nil
+ return 0, p.err
+ }
+ p.c.Wait()
+ }
+}
+
+var (
+ errClosedPipeWrite = errors.New("write on closed buffer")
+ errUninitializedPipeWrite = errors.New("write on uninitialized buffer")
+)
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *pipe) Write(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if p.err != nil || p.breakErr != nil {
+ return 0, errClosedPipeWrite
+ }
+ // pipe.setBuffer is never invoked, leaving the buffer uninitialized.
+ // We shouldn't try to write to an uninitialized pipe,
+ // but returning an error is better than panicking.
+ if p.b == nil {
+ return 0, errUninitializedPipeWrite
+ }
+ return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *pipe) closeWithError(dst *error, err error, fn func()) {
+ if err == nil {
+ panic("err must be non-nil")
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if *dst != nil {
+ // Already been done.
+ return
+ }
+ p.readFn = fn
+ if dst == &p.breakErr {
+ if p.b != nil {
+ p.unread += p.b.Len()
+ }
+ p.b = nil
+ }
+ *dst = err
+ p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *pipe) Err() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
+ return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
new file mode 100644
index 0000000..51fca38
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -0,0 +1,3350 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "net"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/internal/httpcommon"
+)
+
+const (
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+
+ // maxQueuedControlFrames is the maximum number of control frames like
+ // SETTINGS, PING and RST_STREAM that will be queued for writing before
+ // the connection is closed to prevent memory exhaustion attacks.
+ maxQueuedControlFrames = 10000
+)
+
+var (
+ errClientDisconnected = errors.New("client disconnected")
+ errClosedBody = errors.New("body closed by handler")
+ errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
+ errStreamClosed = errors.New("http2: stream closed")
+)
+
+var responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &responseWriterState{}
+ rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ testHookOnConn func()
+ testHookGetServerConn func(*serverConn)
+ testHookOnPanicMu *sync.Mutex // nil except in tests
+ testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxDecoderHeaderTableSize optionally specifies the http2
+ // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
+ // informs the remote endpoint of the maximum size of the header compression
+ // table used to decode header blocks, in octets. If zero, the default value
+ // of 4096 is used.
+ MaxDecoderHeaderTableSize uint32
+
+ // MaxEncoderHeaderTableSize optionally specifies an upper limit for the
+ // header compression table used for encoding request headers. Received
+ // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
+ // the default value of 4096 is used.
+ MaxEncoderHeaderTableSize uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+
+ // IdleTimeout specifies how long until idle clients should be
+ // closed with a GOAWAY frame. PING frames are not considered
+ // activity for the purposes of IdleTimeout.
+ // If zero or negative, there is no timeout.
+ IdleTimeout time.Duration
+
+ // ReadIdleTimeout is the timeout after which a health check using a ping
+ // frame will be carried out if no frame is received on the connection.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to a ping is not received.
+ // If zero, a default of 15 seconds is used.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which a connection will be
+ // closed if no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ // If zero or negative, there is no timeout.
+ WriteByteTimeout time.Duration
+
+ // MaxUploadBufferPerConnection is the size of the initial flow
+ // control window for each connections. The HTTP/2 spec does not
+ // allow this to be smaller than 65535 or larger than 2^32-1.
+ // If the value is outside this range, a default value will be
+ // used instead.
+ MaxUploadBufferPerConnection int32
+
+ // MaxUploadBufferPerStream is the size of the initial flow control
+ // window for each stream. The HTTP/2 spec does not allow this to
+ // be larger than 2^32-1. If the value is zero or larger than the
+ // maximum, a default value will be used instead.
+ MaxUploadBufferPerStream int32
+
+ // NewWriteScheduler constructs a write scheduler for a connection.
+ // If nil, a default scheduler is chosen.
+ NewWriteScheduler func() WriteScheduler
+
+ // CountError, if non-nil, is called on HTTP/2 server errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
+ // Internal state. This is a pointer (rather than embedded directly)
+ // so that we don't embed a Mutex in this struct, which will make the
+ // struct non-copyable, which might break some callers.
+ state *serverInternalState
+
+ // Synchronization group used for testing.
+ // Outside of tests, this is nil.
+ group synctestGroupInterface
+}
+
+func (s *Server) markNewGoroutine() {
+ if s.group != nil {
+ s.group.Join()
+ }
+}
+
+func (s *Server) now() time.Time {
+ if s.group != nil {
+ return s.group.Now()
+ }
+ return time.Now()
+}
+
+// newTimer creates a new time.Timer, or a synthetic timer in tests.
+func (s *Server) newTimer(d time.Duration) timer {
+ if s.group != nil {
+ return s.group.NewTimer(d)
+ }
+ return timeTimer{time.NewTimer(d)}
+}
+
+// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
+func (s *Server) afterFunc(d time.Duration, f func()) timer {
+ if s.group != nil {
+ return s.group.AfterFunc(d, f)
+ }
+ return timeTimer{time.AfterFunc(d, f)}
+}
+
+type serverInternalState struct {
+ mu sync.Mutex
+ activeConns map[*serverConn]struct{}
+}
+
+func (s *serverInternalState) registerConn(sc *serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ s.activeConns[sc] = struct{}{}
+ s.mu.Unlock()
+}
+
+func (s *serverInternalState) unregisterConn(sc *serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ delete(s.activeConns, sc)
+ s.mu.Unlock()
+}
+
+func (s *serverInternalState) startGracefulShutdown() {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ for sc := range s.activeConns {
+ sc.startGracefulShutdown()
+ }
+ s.mu.Unlock()
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) error {
+ if s == nil {
+ panic("nil *http.Server")
+ }
+ if conf == nil {
+ conf = new(Server)
+ }
+ conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ if h1, h2 := s, conf; h2.IdleTimeout == 0 {
+ if h1.IdleTimeout != 0 {
+ h2.IdleTimeout = h1.IdleTimeout
+ } else {
+ h2.IdleTimeout = h1.ReadTimeout
+ }
+ }
+ s.RegisterOnShutdown(conf.state.startGracefulShutdown)
+
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ } else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
+ // If they already provided a TLS 1.0–1.2 CipherSuite list, return an
+ // error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
+ // ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
+ haveRequired := false
+ for _, cs := range s.TLSConfig.CipherSuites {
+ switch cs {
+ case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ // Alternative MTI cipher to not discourage ECDSA-only servers.
+ // See http://golang.org/cl/30721 for further information.
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ haveRequired = true
+ }
+ }
+ if !haveRequired {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
+ }
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ s.TLSConfig.PreferServerCipherSuites = true
+
+ if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+ }
+ if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
+ }
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+ }
+ protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) {
+ if testHookOnConn != nil {
+ testHookOnConn()
+ }
+ // The TLSNextProto interface predates contexts, so
+ // the net/http package passes down its per-connection
+ // base context via an exported but unadvertised
+ // method on the Handler. This is for internal
+ // net/http<=>http2 use only.
+ var ctx context.Context
+ type baseContexter interface {
+ BaseContext() context.Context
+ }
+ if bc, ok := h.(baseContexter); ok {
+ ctx = bc.BaseContext()
+ }
+ conf.ServeConn(c, &ServeConnOpts{
+ Context: ctx,
+ Handler: h,
+ BaseConfig: hs,
+ SawClientPreface: sawClientPreface,
+ })
+ }
+ s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ protoHandler(hs, c, h, false)
+ }
+ // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+ //
+ // A connection passed in this method has already had the HTTP/2 preface read from it.
+ s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ nc, err := unencryptedNetConnFromTLSConn(c)
+ if err != nil {
+ if lg := hs.ErrorLog; lg != nil {
+ lg.Print(err)
+ } else {
+ log.Print(err)
+ }
+ go c.Close()
+ return
+ }
+ protoHandler(hs, nc, h, true)
+ }
+ return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type ServeConnOpts struct {
+ // Context is the base context to use.
+ // If nil, context.Background is used.
+ Context context.Context
+
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *http.Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler http.Handler
+
+ // UpgradeRequest is an initial request received on a connection
+ // undergoing an h2c upgrade. The request body must have been
+ // completely read from the connection before calling ServeConn,
+ // and the 101 Switching Protocols response written.
+ UpgradeRequest *http.Request
+
+ // Settings is the decoded contents of the HTTP2-Settings header
+ // in an h2c upgrade request.
+ Settings []byte
+
+ // SawClientPreface is set if the HTTP/2 connection preface
+ // has already been read from the connection.
+ SawClientPreface bool
+}
+
+func (o *ServeConnOpts) context() context.Context {
+ if o != nil && o.Context != nil {
+ return o.Context
+ }
+ return context.Background()
+}
+
+func (o *ServeConnOpts) baseConfig() *http.Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(http.Server)
+}
+
+func (o *ServeConnOpts) handler() http.Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return http.DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ s.serveConn(c, opts, nil)
+}
+
+func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) {
+ baseCtx, cancel := serverConnBaseContext(c, opts)
+ defer cancel()
+
+ http1srv := opts.baseConfig()
+ conf := configFromServer(http1srv, s)
+ sc := &serverConn{
+ srv: s,
+ hs: http1srv,
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan FrameWriteRequest, 8),
+ serveMsgCh: make(chan interface{}, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+ advMaxStreams: conf.MaxConcurrentStreams,
+ initialStreamSendWindowSize: initialWindowSize,
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
+ maxFrameSize: initialMaxFrameSize,
+ pingTimeout: conf.PingTimeout,
+ countErrorFunc: conf.CountError,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
+ sawClientPreface: opts.SawClientPreface,
+ }
+ if newf != nil {
+ newf(sc)
+ }
+
+ s.state.registerConn(sc)
+ defer s.state.unregisterConn(sc)
+
+ // The net/http package sets the write deadline from the
+ // http.Server.WriteTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already set.
+ // Write deadlines are set per stream in serverConn.newStream.
+ // Disarm the net.Conn write deadline here.
+ if sc.hs.WriteTimeout > 0 {
+ sc.conn.SetWriteDeadline(time.Time{})
+ }
+
+ if s.NewWriteScheduler != nil {
+ sc.writeSched = s.NewWriteScheduler()
+ } else {
+ sc.writeSched = newRoundRobinWriteScheduler()
+ }
+
+ // These start at the RFC-specified defaults. If there is a higher
+ // configured value for inflow, that will be updated when we send a
+ // WINDOW_UPDATE shortly after sending SETTINGS.
+ sc.flow.add(initialWindowSize)
+ sc.inflow.init(initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+ sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
+
+ fr := NewFramer(sc.bw, c)
+ if conf.CountError != nil {
+ fr.countError = conf.CountError
+ }
+ fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
+ fr.MaxHeaderListSize = sc.maxHeaderListSize()
+ fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
+ sc.framer = fr
+
+ if tc, ok := c.(connectionStater); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if opts.Settings != nil {
+ fr := &SettingsFrame{
+ FrameHeader: FrameHeader{valid: true},
+ p: opts.Settings,
+ }
+ if err := fr.ForeachSetting(sc.processSetting); err != nil {
+ sc.rejectConn(ErrCodeProtocol, "invalid settings")
+ return
+ }
+ opts.Settings = nil
+ }
+
+ if hook := testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+
+ if opts.UpgradeRequest != nil {
+ sc.upgradeRequest(opts.UpgradeRequest)
+ opts.UpgradeRequest = nil
+ }
+
+ sc.serve(conf)
+}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
+ ctx, cancel = context.WithCancel(opts.context())
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, http.ServerContextKey, hs)
+ }
+ return
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+type serverConn struct {
+ // Immutable:
+ srv *Server
+ hs *http.Server
+ conn net.Conn
+ bw *bufferedWriter // writing to conn
+ handler http.Handler
+ baseCtx context.Context
+ framer *Framer
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
+ wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
+ flow outflow // conn-wide (not stream-specific) outbound flow control
+ inflow inflow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+ writeSched WriteScheduler
+ countErrorFunc func(errType string)
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawClientPreface bool // preface has already been read, used in h2c upgrade
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ queuedControlFrames int // control frames in the writeSched queue
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curClientStreams uint32 // number of open streams initiated by the client
+ curPushedStreams uint32 // number of open streams initiated by server push
+ curHandlers uint32 // number of running handler goroutines
+ maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+ maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
+ streams map[uint32]*stream
+ unstartedHandlers []unstartedHandler
+ initialStreamSendWindowSize int32
+ initialStreamRecvWindowSize int32
+ maxFrameSize int32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ canonHeaderKeysSize int // canonHeader keys size in bytes
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ inGoAway bool // we've started to or sent GOAWAY
+ inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ pingSent bool
+ sentPingData [8]byte
+ goAwayCode ErrCode
+ shutdownTimer timer // nil until used
+ idleTimer timer // nil if unused
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
+ readIdleTimer timer // nil if unused
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+
+ // Used by startGracefulShutdown.
+ shutdownOnce sync.Once
+}
+
+func (sc *serverConn) maxHeaderListSize() uint32 {
+ n := sc.hs.MaxHeaderBytes
+ if n <= 0 {
+ n = http.DefaultMaxHeaderBytes
+ }
+ return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
+}
+
+func (sc *serverConn) curOpenStreams() uint32 {
+ sc.serveG.check()
+ return sc.curClientStreams + sc.curPushedStreams
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+ // immutable:
+ sc *serverConn
+ id uint32
+ body *pipe // non-nil if expecting DATA frames
+ cw closeWaiter // closed wait stream transitions to closed state
+ ctx context.Context
+ cancelCtx func()
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow outflow // limits writing from Handler to client
+ inflow inflow // what the client is allowed to POST/etc to us
+ state streamState
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline timer // nil if unused
+ writeDeadline timer // nil if unused
+ closeErr error // set before cw is closed
+
+ trailer http.Header // accumulated trailers
+ reqTrailer http.Header // handler's Request.Trailer
+}
+
+func (sc *serverConn) Framer() *Framer { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+ sc.serveG.check()
+ // http://tools.ietf.org/html/rfc7540#section-5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID%2 == 1 {
+ if streamID <= sc.maxClientStreamID {
+ return stateClosed, nil
+ }
+ } else {
+ if streamID <= sc.maxPushPromiseID {
+ return stateClosed, nil
+ }
+ }
+ return stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *serverConn) setConnState(state http.ConnState) {
+ if sc.hs.ConnState != nil {
+ sc.hs.ConnState(sc.conn, state)
+ }
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ if errors.Is(err, net.ErrClosed) {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
+// of the entries in the canonHeader cache.
+// This should be larger than the size of unique, uncommon header keys likely to
+// be sent by the peer, while not so high as to permit unreasonable memory usage
+// if the peer sends an unbounded number of unique header keys.
+const maxCachedCanonicalHeadersKeysSize = 2048
+
+func (sc *serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ cv, ok := httpcommon.CachedCanonicalHeader(v)
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = http.CanonicalHeaderKey(v)
+ size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
+ if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
+ sc.canonHeader[v] = cv
+ sc.canonHeaderKeysSize += size
+ }
+ return cv
+}
+
+type readFrameResult struct {
+ f Frame // valid until readMore is called
+ err error
+
+ // readMore should be called once the consumer no longer needs or
+ // retains f. After readMore, f is invalid and more frames can be
+ // read.
+ readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+ sc.srv.markNewGoroutine()
+ gate := make(chan struct{})
+ gateDone := func() { gate <- struct{}{} }
+ for {
+ f, err := sc.framer.ReadFrame()
+ select {
+ case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
+ case <-sc.doneServing:
+ return
+ }
+ select {
+ case <-gate:
+ case <-sc.doneServing:
+ return
+ }
+ if terminalReadFrameError(err) {
+ return
+ }
+ }
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type frameWriteResult struct {
+ _ incomparable
+ wr FrameWriteRequest // what was written (or attempted)
+ err error // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
+ sc.srv.markNewGoroutine()
+ var err error
+ if wd == nil {
+ err = wr.write.writeFrame(sc)
+ } else {
+ err = sc.framer.endWrite()
+ }
+ sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, errClientDisconnected)
+ }
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
+ if testHookOnPanicMu != nil {
+ testHookOnPanicMu.Lock()
+ defer testHookOnPanicMu.Unlock()
+ }
+ if testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *serverConn) serve(conf http2Config) {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ if VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
+
+ settings := writeSettings{
+ {SettingMaxFrameSize, conf.MaxReadFrameSize},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
+ {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
+ }
+ if !disableExtendedConnectProtocol {
+ settings = append(settings, Setting{SettingEnableConnectProtocol, 1})
+ }
+ sc.writeFrame(FrameWriteRequest{
+ write: settings,
+ })
+ sc.unackedSettings++
+
+ // Each connection starts with initialWindowSize inflow tokens.
+ // If a higher value is configured, we add more tokens.
+ if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
+ sc.sendWindowUpdate(nil, int(diff))
+ }
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
+ sc.setConnState(http.StateActive)
+ sc.setConnState(http.StateIdle)
+
+ if sc.srv.IdleTimeout > 0 {
+ sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ defer sc.idleTimer.Stop()
+ }
+
+ if conf.SendPingTimeout > 0 {
+ sc.readIdleTimeout = conf.SendPingTimeout
+ sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ defer sc.readIdleTimer.Stop()
+ }
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ defer settingsTimer.Stop()
+
+ lastFrameTime := sc.srv.now()
+ loopNum := 0
+ for {
+ loopNum++
+ select {
+ case wr := <-sc.wantWriteFrameCh:
+ if se, ok := wr.write.(StreamError); ok {
+ sc.resetStream(se)
+ break
+ }
+ sc.writeFrame(wr)
+ case res := <-sc.wroteFrameCh:
+ sc.wroteFrame(res)
+ case res := <-sc.readFrameCh:
+ lastFrameTime = sc.srv.now()
+ // Process any written frames before reading new frames from the client since a
+ // written frame could have triggered a new stream to be started.
+ if sc.writingFrameAsync {
+ select {
+ case wroteRes := <-sc.wroteFrameCh:
+ sc.wroteFrame(wroteRes)
+ default:
+ }
+ }
+ if !sc.processFrameFromReader(res) {
+ return
+ }
+ res.readMore()
+ if settingsTimer != nil {
+ settingsTimer.Stop()
+ settingsTimer = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case msg := <-sc.serveMsgCh:
+ switch v := msg.(type) {
+ case func(int):
+ v(loopNum) // for testing
+ case *serverMessage:
+ switch v {
+ case settingsTimerMsg:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case idleTimerMsg:
+ sc.vlogf("connection is idle")
+ sc.goAway(ErrCodeNo)
+ case readIdleTimerMsg:
+ sc.handlePingTimer(lastFrameTime)
+ case shutdownTimerMsg:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case gracefulShutdownMsg:
+ sc.startGracefulShutdownInternal()
+ case handlerDoneMsg:
+ sc.handlerDone()
+ default:
+ panic("unknown timer")
+ }
+ case *startPushRequest:
+ sc.startPush(v)
+ case func(*serverConn):
+ v(sc)
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v))
+ }
+ }
+
+ // If the peer is causing us to generate a lot of control frames,
+ // but not reading them from us, assume they are trying to make us
+ // run out of memory.
+ if sc.queuedControlFrames > maxQueuedControlFrames {
+ sc.vlogf("http2: too many control frames in send queue, closing connection")
+ return
+ }
+
+ // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
+ // with no error code (graceful shutdown), don't start the timer until
+ // all open streams have been completed.
+ sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
+ gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
+ if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
+ sc.shutDownIn(goAwayTimeout)
+ }
+ }
+}
+
+func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
+ if sc.pingSent {
+ sc.logf("timeout waiting for PING response")
+ if f := sc.countErrorFunc; f != nil {
+ f("conn_close_lost_ping")
+ }
+ sc.conn.Close()
+ return
+ }
+
+ pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
+ now := sc.srv.now()
+ if pingAt.After(now) {
+ // We received frames since arming the ping timer.
+ // Reset it for the next possible timeout.
+ sc.readIdleTimer.Reset(pingAt.Sub(now))
+ return
+ }
+
+ sc.pingSent = true
+ // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
+ // is we send a PING frame containing 0s.
+ _, _ = rand.Read(sc.sentPingData[:])
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePing{data: sc.sentPingData},
+ })
+ sc.readIdleTimer.Reset(sc.pingTimeout)
+}
+
+type serverMessage int
+
+// Message values sent to serveMsgCh.
+var (
+ settingsTimerMsg = new(serverMessage)
+ idleTimerMsg = new(serverMessage)
+ readIdleTimerMsg = new(serverMessage)
+ shutdownTimerMsg = new(serverMessage)
+ gracefulShutdownMsg = new(serverMessage)
+ handlerDoneMsg = new(serverMessage)
+)
+
+func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
+func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
+func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
+func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
+
+func (sc *serverConn) sendServeMsg(msg interface{}) {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.serveMsgCh <- msg:
+ case <-sc.doneServing:
+ }
+}
+
+var errPrefaceTimeout = errors.New("timeout waiting for client preface")
+
+// readPreface reads the ClientPreface greeting from the peer or
+// returns errPrefaceTimeout on timeout, or an error if the greeting
+// is invalid.
+func (sc *serverConn) readPreface() error {
+ if sc.sawClientPreface {
+ return nil
+ }
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C():
+ return errPrefaceTimeout
+ case err := <-errc:
+ if err == nil {
+ if VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
+ }
+ return err
+ }
+}
+
+var errChanPool = sync.Pool{
+ New: func() interface{} { return make(chan error, 1) },
+}
+
+var writeDataPool = sync.Pool{
+ New: func() interface{} { return new(writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
+ ch := errChanPool.Get().(chan error)
+ writeArg := writeDataPool.Get().(*writeData)
+ *writeArg = writeData{stream.id, data, endStream}
+ err := sc.writeFrameFromHandler(FrameWriteRequest{
+ write: writeArg,
+ stream: stream,
+ done: ch,
+ })
+ if err != nil {
+ return err
+ }
+ var frameWriteDone bool // the frame write is done (successfully or not)
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-stream.cw:
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ default:
+ return errStreamClosed
+ }
+ }
+ errChanPool.Put(ch)
+ if frameWriteDone {
+ writeDataPool.Put(writeArg)
+ }
+ return err
+}
+
+// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wr:
+ return nil
+ case <-sc.doneServing:
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
+ return errClientDisconnected
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
+ sc.serveG.check()
+
+ // If true, wr will not be written and wr.done will not be signaled.
+ var ignoreWrite bool
+
+ // We are not allowed to write frames on closed streams. RFC 7540 Section
+ // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
+ // a closed stream." Our server never sends PRIORITY, so that exception
+ // does not apply.
+ //
+ // The serverConn might close an open stream while the stream's handler
+ // is still running. For example, the server might close a stream when it
+ // receives bad data from the client. If this happens, the handler might
+ // attempt to write a frame after the stream has been closed (since the
+ // handler hasn't yet been notified of the close). In this case, we simply
+ // ignore the frame. The handler will notice that the stream is closed when
+ // it waits for the frame to be written.
+ //
+ // As an exception to this rule, we allow sending RST_STREAM after close.
+ // This allows us to immediately reject new streams without tracking any
+ // state for those streams (except for the queued RST_STREAM frame). This
+ // may result in duplicate RST_STREAMs in some cases, but the client should
+ // ignore those.
+ if wr.StreamID() != 0 {
+ _, isReset := wr.write.(StreamError)
+ if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
+ ignoreWrite = true
+ }
+ }
+
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
+ switch wr.write.(type) {
+ case *writeResHeaders:
+ wr.stream.wroteHeaders = true
+ case write100ContinueHeadersFrame:
+ if wr.stream.wroteHeaders {
+ // We do not need to notify wr.done because this frame is
+ // never written with wr.done != nil.
+ if wr.done != nil {
+ panic("wr.done != nil for write100ContinueHeadersFrame")
+ }
+ ignoreWrite = true
+ }
+ }
+
+ if !ignoreWrite {
+ if wr.isControl() {
+ sc.queuedControlFrames++
+ // For extra safety, detect wraparounds, which should not happen,
+ // and pull the plug.
+ if sc.queuedControlFrames < 0 {
+ sc.conn.Close()
+ }
+ }
+ sc.writeSched.Push(wr)
+ }
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wr (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wr.
+func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+
+ st := wr.stream
+ if st != nil {
+ switch st.state {
+ case stateHalfClosedLocal:
+ switch wr.write.(type) {
+ case StreamError, handlerPanicRST, writeWindowUpdate:
+ // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
+ // in this state. (We never send PRIORITY from the server, so that is not checked.)
+ default:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
+ }
+ case stateClosed:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
+ }
+ }
+ if wpp, ok := wr.write.(*writePushPromise); ok {
+ var err error
+ wpp.promisedID, err = wpp.allocatePromisedID()
+ if err != nil {
+ sc.writingFrameAsync = false
+ wr.replyToWriter(err)
+ return
+ }
+ }
+
+ sc.writingFrame = true
+ sc.needsFrameFlush = true
+ if wr.write.staysWithinBuffer(sc.bw.Available()) {
+ sc.writingFrameAsync = false
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrame(frameWriteResult{wr: wr, err: err})
+ } else if wd, ok := wr.write.(*writeData); ok {
+ // Encode the frame in the serve goroutine, to ensure we don't have
+ // any lingering asynchronous references to data passed to Write.
+ // See https://go.dev/issue/58446.
+ sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
+ sc.writingFrameAsync = true
+ go sc.writeFrameAsync(wr, wd)
+ } else {
+ sc.writingFrameAsync = true
+ go sc.writeFrameAsync(wr, nil)
+ }
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// main ServeHTTP goroutine, this will show up rarely.
+var errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *serverConn) wroteFrame(res frameWriteResult) {
+ sc.serveG.check()
+ if !sc.writingFrame {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+ sc.writingFrameAsync = false
+
+ if res.err != nil {
+ sc.conn.Close()
+ }
+
+ wr := res.wr
+
+ if writeEndsStream(wr.write) {
+ st := wr.stream
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for closing a ResponseWriter while still
+ // reading data (see possible TODO at top of
+ // this file), we go into closed state here
+ // anyway, after telling the peer we're
+ // hanging up on them. We'll transition to
+ // stateClosed after the RST_STREAM frame is
+ // written.
+ st.state = stateHalfClosedLocal
+ // Section 8.1: a server MAY request that the client abort
+ // transmission of a request without error by sending a
+ // RST_STREAM with an error code of NO_ERROR after sending
+ // a complete response.
+ sc.resetStream(streamError(st.id, ErrCodeNo))
+ case stateHalfClosedRemote:
+ sc.closeStream(st, errHandlerComplete)
+ }
+ } else {
+ switch v := wr.write.(type) {
+ case StreamError:
+ // st may be unknown if the RST_STREAM was generated to reject bad input.
+ if st, ok := sc.streams[v.StreamID]; ok {
+ sc.closeStream(st, v)
+ }
+ case handlerPanicRST:
+ sc.closeStream(wr.stream, errHandlerPanicked)
+ }
+ }
+
+ // Reply (if requested) to unblock the ServeHTTP goroutine.
+ wr.replyToWriter(res.err)
+
+ sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written and we need to send one, the best frame
+// to send is selected by writeSched.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame || sc.inFrameScheduleLoop {
+ return
+ }
+ sc.inFrameScheduleLoop = true
+ for !sc.writingFrameAsync {
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(FrameWriteRequest{
+ write: &writeGoAway{
+ maxStreamID: sc.maxClientStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ continue
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
+ continue
+ }
+ if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
+ if wr, ok := sc.writeSched.Pop(); ok {
+ if wr.isControl() {
+ sc.queuedControlFrames--
+ }
+ sc.startFrameWrite(wr)
+ continue
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ continue
+ }
+ break
+ }
+ sc.inFrameScheduleLoop = false
+}
+
+// startGracefulShutdown gracefully shuts down a connection. This
+// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
+// shutting down. The connection isn't closed until all current
+// streams are done.
+//
+// startGracefulShutdown returns immediately; it does not wait until
+// the connection has shut down.
+func (sc *serverConn) startGracefulShutdown() {
+ sc.serveG.checkNotOn() // NOT
+ sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
+}
+
+// After sending GOAWAY with an error code (non-graceful shutdown), the
+// connection will close after goAwayTimeout.
+//
+// If we close the connection immediately after sending GOAWAY, there may
+// be unsent data in our kernel receive buffer, which will cause the kernel
+// to send a TCP RST on close() instead of a FIN. This RST will abort the
+// connection immediately, whether or not the client had received the GOAWAY.
+//
+// Ideally we should delay for at least 1 RTT + epsilon so the client has
+// a chance to read the GOAWAY and stop sending messages. Measuring RTT
+// is hard, so we approximate with 1 second. See golang.org/issue/18701.
+//
+// This is a var so it can be shorter in tests, where all requests uses the
+// loopback interface making the expected RTT very small.
+//
+// TODO: configurable?
+var goAwayTimeout = 1 * time.Second
+
+func (sc *serverConn) startGracefulShutdownInternal() {
+ sc.goAway(ErrCodeNo)
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ if sc.goAwayCode == ErrCodeNo {
+ sc.goAwayCode = code
+ }
+ return
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(FrameWriteRequest{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.resetQueued = true
+ }
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
+ sc.serveG.check()
+ err := res.err
+ if err != nil {
+ if err == ErrFrameTooLarge {
+ sc.goAway(ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ } else {
+ f := res.f
+ if VerboseLogs {
+ sc.vlogf("http2: server read frame %v", summarizeFrame(f))
+ }
+ err = sc.processFrame(f)
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case StreamError:
+ sc.resetStream(ev)
+ return true
+ case goAwayFlowError:
+ sc.goAway(ErrCodeFlowControl)
+ return true
+ case ConnectionError:
+ if res.f != nil {
+ if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
+ sc.maxClientStreamID = id
+ }
+ }
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if res.err != nil {
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("http2: server closing client connection: %v", err)
+ }
+ return false
+ }
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
+ }
+ sc.sawFirstSettings = true
+ }
+
+ // Discard frames for streams initiated after the identified last
+ // stream sent in a GOAWAY, or all frames after sending an error.
+ // We still need to return connection-level flow control for DATA frames.
+ // RFC 9113 Section 6.8.
+ if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
+
+ if f, ok := f.(*DataFrame); ok {
+ if !sc.inflow.take(f.Length) {
+ return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
+ }
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+ }
+ return nil
+ }
+
+ switch f := f.(type) {
+ case *SettingsFrame:
+ return sc.processSettings(f)
+ case *MetaHeadersFrame:
+ return sc.processHeaders(f)
+ case *WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *PingFrame:
+ return sc.processPing(f)
+ case *DataFrame:
+ return sc.processData(f)
+ case *RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *PriorityFrame:
+ return sc.processPriority(f)
+ case *GoAwayFrame:
+ return sc.processGoAway(f)
+ case *PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
+ default:
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ if sc.pingSent && sc.sentPingData == f.Data {
+ // This is a response to a PING we sent.
+ sc.pingSent = false
+ sc.readIdleTimer.Reset(sc.readIdleTimeout)
+ }
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
+ }
+ sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
+ return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
+ }
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
+ }
+ if st != nil {
+ st.cancelCtx()
+ sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
+ }
+ return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+ sc.serveG.check()
+ if st.state == stateIdle || st.state == stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = stateClosed
+ if st.readDeadline != nil {
+ st.readDeadline.Stop()
+ }
+ if st.writeDeadline != nil {
+ st.writeDeadline.Stop()
+ }
+ if st.isPushed() {
+ sc.curPushedStreams--
+ } else {
+ sc.curClientStreams--
+ }
+ delete(sc.streams, st.id)
+ if len(sc.streams) == 0 {
+ sc.setConnState(http.StateIdle)
+ if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil {
+ sc.idleTimer.Reset(sc.srv.IdleTimeout)
+ }
+ if h1ServerKeepAlivesDisabled(sc.hs) {
+ sc.startGracefulShutdownInternal()
+ }
+ }
+ if p := st.body; p != nil {
+ // Return any buffered unread bytes worth of conn-level flow control.
+ // See golang.org/issue/16481
+ sc.sendWindowUpdate(nil, p.Len())
+
+ p.CloseWithError(err)
+ }
+ if e, ok := err.(StreamError); ok {
+ if e.Cause != nil {
+ err = e.Cause
+ } else {
+ err = errStreamClosed
+ }
+ }
+ st.closeErr = err
+ st.cancelCtx()
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.CloseStream(st.id)
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
+ }
+ return nil
+ }
+ if f.NumSettings() > 100 || f.HasDuplicates() {
+ // This isn't actually in the spec, but hang up on
+ // suspiciously large settings frames or those with
+ // duplicate entries.
+ return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
+ // acknowledged individually, even if multiple are received before the ACK.
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ if VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
+ switch s.ID {
+ case SettingHeaderTableSize:
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case SettingMaxFrameSize:
+ sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
+ case SettingMaxHeaderListSize:
+ sc.peerMaxHeaderListSize = s.Val
+ case SettingEnableConnectProtocol:
+ // Receipt of this parameter by a server does not
+ // have any impact
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ if VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialStreamSendWindowSize
+ sc.initialStreamSendWindowSize = int32(val)
+ growth := int32(val) - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+ sc.serveG.check()
+ id := f.Header().StreamID
+
+ data := f.Data()
+ state, st := sc.state(id)
+ if id == 0 || state == stateIdle {
+ // Section 6.1: "DATA frames MUST be associated with a
+ // stream. If a DATA frame is received whose stream
+ // identifier field is 0x0, the recipient MUST respond
+ // with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ //
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
+ }
+
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+
+ // But still enforce their connection-level flow control,
+ // and return any flow control bytes since we're not going
+ // to consume them.
+ if !sc.inflow.take(f.Length) {
+ return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
+ }
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+ if st != nil && st.resetQueued {
+ // Already have a stream error in flight. Don't send another.
+ return nil
+ }
+ return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ if !sc.inflow.take(f.Length) {
+ return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
+ }
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+ st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
+ // value of a content-length header field does not equal the sum of the
+ // DATA frame payload lengths that form the body.
+ return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
+ }
+ if f.Length > 0 {
+ // Check whether the client has flow control quota.
+ if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
+ return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
+ }
+
+ if len(data) > 0 {
+ st.bodyBytes += int64(len(data))
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ // The handler has closed the request body.
+ // Return the connection-level flow control for the discarded data,
+ // but not the stream-level flow control.
+ sc.sendWindowUpdate(nil, int(f.Length)-wrote)
+ return nil
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ }
+
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ // Call sendWindowUpdate even if there is no padding,
+ // to return buffered flow control credit if the sent
+ // window has shrunk.
+ pad := int32(f.Length) - int32(len(data))
+ sc.sendWindowUpdate32(nil, pad)
+ sc.sendWindowUpdate32(st, pad)
+ }
+ if f.StreamEnded() {
+ st.endStream()
+ }
+ return nil
+}
+
+func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
+ sc.serveG.check()
+ if f.ErrCode != ErrCodeNo {
+ sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ } else {
+ sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ }
+ sc.startGracefulShutdownInternal()
+ // http://tools.ietf.org/html/rfc7540#section-6.8
+ // We should not create any new streams, which means we should disable push.
+ sc.pushEnabled = false
+ return nil
+}
+
+// isPushed reports whether the stream is server-initiated.
+func (st *stream) isPushed() bool {
+ return st.id%2 == 0
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+// onReadTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's ReadTimeout has fired.
+func (st *stream) onReadTimeout() {
+ if st.body != nil {
+ // Wrap the ErrDeadlineExceeded to avoid callers depending on us
+ // returning the bare error.
+ st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+ }
+}
+
+// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's WriteTimeout has fired.
+func (st *stream) onWriteTimeout() {
+ st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
+ StreamID: st.id,
+ Code: ErrCodeInternal,
+ Cause: os.ErrDeadlineExceeded,
+ }})
+}
+
+func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
+ sc.serveG.check()
+ id := f.StreamID
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
+ return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
+ }
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ if st := sc.streams[f.StreamID]; st != nil {
+ if st.resetQueued {
+ // We're sending RST_STREAM to close the stream, so don't bother
+ // processing this frame.
+ return nil
+ }
+ // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
+ // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
+ // this state, it MUST respond with a stream error (Section 5.4.2) of
+ // type STREAM_CLOSED.
+ if st.state == stateHalfClosedRemote {
+ return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
+ }
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxClientStreamID {
+ return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
+ }
+ sc.maxClientStreamID = id
+
+ if sc.idleTimer != nil {
+ sc.idleTimer.Stop()
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.2
+ // [...] Endpoints MUST NOT exceed the limit set by their peer. An
+ // endpoint that receives a HEADERS frame that causes their
+ // advertised concurrent stream limit to be exceeded MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+ // or REFUSED_STREAM.
+ if sc.curClientStreams+1 > sc.advMaxStreams {
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
+ }
+
+ initialState := stateOpen
+ if f.StreamEnded() {
+ initialState = stateHalfClosedRemote
+ }
+ st := sc.newStream(id, 0, initialState)
+
+ if f.HasPriority() {
+ if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(st.id, f.Priority)
+ }
+
+ rw, req, err := sc.newWriterAndRequest(st, f)
+ if err != nil {
+ return err
+ }
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
+ st.body = req.Body.(*requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+
+ handler := sc.handler.ServeHTTP
+ if f.Truncated {
+ // Their header list was too long. Send a 431 error.
+ handler = handleHeaderListTooLong
+ } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
+ handler = new400Handler(err)
+ }
+
+ // The net/http package sets the read deadline from the
+ // http.Server.ReadTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already
+ // set. Disarm it here after the request headers are read,
+ // similar to how the http1 server works. Here it's
+ // technically more like the http1 Server's ReadHeaderTimeout
+ // (in Go 1.8), though. That's a more sane option anyway.
+ if sc.hs.ReadTimeout > 0 {
+ sc.conn.SetReadDeadline(time.Time{})
+ st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ }
+
+ return sc.scheduleHandler(id, rw, req, handler)
+}
+
+func (sc *serverConn) upgradeRequest(req *http.Request) {
+ sc.serveG.check()
+ id := uint32(1)
+ sc.maxClientStreamID = id
+ st := sc.newStream(id, 0, stateHalfClosedRemote)
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
+ rw := sc.newResponseWriter(st, req)
+
+ // Disable any read deadline set by the net/http package
+ // prior to the upgrade.
+ if sc.hs.ReadTimeout > 0 {
+ sc.conn.SetReadDeadline(time.Time{})
+ }
+
+ // This is the first request on the connection,
+ // so start the handler directly rather than going
+ // through scheduleHandler.
+ sc.curHandlers++
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+}
+
+func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
+ }
+
+ if len(f.PseudoFields()) > 0 {
+ return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
+ }
+ if st.trailer != nil {
+ for _, hf := range f.RegularFields() {
+ key := sc.canonicalHeader(hf.Name)
+ if !httpguts.ValidTrailerHeader(key) {
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
+ return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
+ }
+ st.trailer[key] = append(st.trailer[key], hf.Value)
+ }
+ }
+ st.endStream()
+ return nil
+}
+
+func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
+ if streamID == p.StreamDep {
+ // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+ // Section 5.3.3 says that a stream can depend on one of its dependencies,
+ // so it's only self-dependencies that are forbidden.
+ return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
+ }
+ return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+ if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
+ return nil
+}
+
+func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
+ sc.serveG.check()
+ if id == 0 {
+ panic("internal error: cannot create stream with id 0")
+ }
+
+ ctx, cancelCtx := context.WithCancel(sc.baseCtx)
+ st := &stream{
+ sc: sc,
+ id: id,
+ state: state,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
+ }
+ st.cw.Init()
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialStreamSendWindowSize)
+ st.inflow.init(sc.initialStreamRecvWindowSize)
+ if sc.hs.WriteTimeout > 0 {
+ st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ }
+
+ sc.streams[id] = st
+ sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
+ if st.isPushed() {
+ sc.curPushedStreams++
+ } else {
+ sc.curClientStreams++
+ }
+ if sc.curOpenStreams() == 1 {
+ sc.setConnState(http.StateActive)
+ }
+
+ return st
+}
+
+func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ rp := httpcommon.ServerRequestParam{
+ Method: f.PseudoValue("method"),
+ Scheme: f.PseudoValue("scheme"),
+ Authority: f.PseudoValue("authority"),
+ Path: f.PseudoValue("path"),
+ Protocol: f.PseudoValue("protocol"),
+ }
+
+ // extended connect is disabled, so we should not see :protocol
+ if disableExtendedConnectProtocol && rp.Protocol != "" {
+ return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
+ }
+
+ isConnect := rp.Method == "CONNECT"
+ if isConnect {
+ if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
+ return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
+ }
+ } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
+ }
+
+ header := make(http.Header)
+ rp.Header = header
+ for _, hf := range f.RegularFields() {
+ header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+ if rp.Authority == "" {
+ rp.Authority = header.Get("Host")
+ }
+ if rp.Protocol != "" {
+ header.Set(":protocol", rp.Protocol)
+ }
+
+ rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
+ if err != nil {
+ return nil, nil, err
+ }
+ bodyOpen := !f.StreamEnded()
+ if bodyOpen {
+ if vv, ok := rp.Header["Content-Length"]; ok {
+ if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
+ req.ContentLength = int64(cl)
+ } else {
+ req.ContentLength = 0
+ }
+ } else {
+ req.ContentLength = -1
+ }
+ req.Body.(*requestBody).pipe = &pipe{
+ b: &dataBuffer{expected: req.ContentLength},
+ }
+ }
+ return rw, req, nil
+}
+
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ var tlsState *tls.ConnectionState // nil if not scheme https
+ if rp.Scheme == "https" {
+ tlsState = sc.tlsState
+ }
+
+ res := httpcommon.NewServerRequest(rp)
+ if res.InvalidReason != "" {
+ return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
+ }
+
+ body := &requestBody{
+ conn: sc,
+ stream: st,
+ needsContinue: res.NeedsContinue,
+ }
+ req := (&http.Request{
+ Method: rp.Method,
+ URL: res.URL,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: rp.Header,
+ RequestURI: res.RequestURI,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: rp.Authority,
+ Body: body,
+ Trailer: res.Trailer,
+ }).WithContext(st.ctx)
+ rw := sc.newResponseWriter(st, req)
+ return rw, req, nil
+}
+
+func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter {
+ rws := responseWriterStatePool.Get().(*responseWriterState)
+ bwSave := rws.bw
+ *rws = responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(chunkWriter{rws})
+ rws.stream = st
+ rws.req = req
+ return &responseWriter{rws: rws}
+}
+
+type unstartedHandler struct {
+ streamID uint32
+ rw *responseWriter
+ req *http.Request
+ handler func(http.ResponseWriter, *http.Request)
+}
+
+// scheduleHandler starts a handler goroutine,
+// or schedules one to start as soon as an existing handler finishes.
+func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
+ sc.serveG.check()
+ maxHandlers := sc.advMaxStreams
+ if sc.curHandlers < maxHandlers {
+ sc.curHandlers++
+ go sc.runHandler(rw, req, handler)
+ return nil
+ }
+ if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
+ return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
+ }
+ sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
+ streamID: streamID,
+ rw: rw,
+ req: req,
+ handler: handler,
+ })
+ return nil
+}
+
+func (sc *serverConn) handlerDone() {
+ sc.serveG.check()
+ sc.curHandlers--
+ i := 0
+ maxHandlers := sc.advMaxStreams
+ for ; i < len(sc.unstartedHandlers); i++ {
+ u := sc.unstartedHandlers[i]
+ if sc.streams[u.streamID] == nil {
+ // This stream was reset before its goroutine had a chance to start.
+ continue
+ }
+ if sc.curHandlers >= maxHandlers {
+ break
+ }
+ sc.curHandlers++
+ go sc.runHandler(u.rw, u.req, u.handler)
+ sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
+ }
+ sc.unstartedHandlers = sc.unstartedHandlers[i:]
+ if len(sc.unstartedHandlers) == 0 {
+ sc.unstartedHandlers = nil
+ }
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ sc.srv.markNewGoroutine()
+ defer sc.sendServeMsg(handlerDoneMsg)
+ didPanic := true
+ defer func() {
+ rw.rws.stream.cancelCtx()
+ if req.MultipartForm != nil {
+ req.MultipartForm.RemoveAll()
+ }
+ if didPanic {
+ e := recover()
+ sc.writeFrameFromHandler(FrameWriteRequest{
+ write: handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ // Same as net/http:
+ if e != nil && e != http.ErrAbortHandler {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ }
+ return
+ }
+ rw.handlerDone()
+ }()
+ handler(rw, req)
+ didPanic = false
+}
+
+func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
+ // 10.5.1 Limits on Header Block Size:
+ // .. "A server that receives a larger header block than it is
+ // willing to handle can send an HTTP 431 (Request Header Fields Too
+ // Large) status code"
+ const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+ w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+ io.WriteString(w, "HTTP Error 431 Request Header Field(s) Too Large
")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = errChanPool.Get().(chan error)
+ }
+ if err := sc.writeFrameFromHandler(FrameWriteRequest{
+ write: headerData,
+ stream: st,
+ done: errc,
+ }); err != nil {
+ return err
+ }
+ if errc != nil {
+ select {
+ case err := <-errc:
+ errChanPool.Put(errc)
+ return err
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ }
+ }
+ return nil
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+ sc.writeFrameFromHandler(FrameWriteRequest{
+ write: write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+ st *stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
+ sc.serveG.checkNotOn() // NOT on
+ if n > 0 {
+ select {
+ case sc.bodyReadCh <- bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+ }
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != stateHalfClosedRemote && st.state != stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.sendWindowUpdate(st, int(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+ sc.serveG.check()
+ var streamID uint32
+ var send int32
+ if st == nil {
+ send = sc.inflow.add(n)
+ } else {
+ streamID = st.id
+ send = st.inflow.add(n)
+ }
+ if send == 0 {
+ return
+ }
+ sc.writeFrame(FrameWriteRequest{
+ write: writeWindowUpdate{streamID: streamID, n: uint32(send)},
+ stream: st,
+ })
+}
+
+// requestBody is the Handler's Request.Body type.
+// Read and Close may be called concurrently.
+type requestBody struct {
+ _ incomparable
+ stream *stream
+ conn *serverConn
+ closeOnce sync.Once // for use by Close only
+ sawEOF bool // for use by Read only
+ pipe *pipe // non-nil if we have an HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+ b.closeOnce.Do(func() {
+ if b.pipe != nil {
+ b.pipe.BreakWithError(errClosedBody)
+ }
+ })
+ return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil || b.sawEOF {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if err == io.EOF {
+ b.sawEOF = true
+ }
+ if b.conn == nil && inTests {
+ return
+ }
+ b.conn.noteBodyReadFromHandler(b.stream, n, err)
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+ rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ http.CloseNotifier = (*responseWriter)(nil)
+ _ http.Flusher = (*responseWriter)(nil)
+ _ stringWriter = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+ // immutable within a request:
+ stream *stream
+ req *http.Request
+ conn *serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader http.Header // nil until called
+ snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) {
+ n, err = cw.rws.writeChunk(p)
+ if err == errStreamClosed {
+ // If writing failed because the stream has been closed,
+ // return the reason it was closed.
+ err = cw.rws.stream.closeErr
+ }
+ return n, err
+}
+
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
+
+func (rws *responseWriterState) hasNonemptyTrailers() bool {
+ for _, trailer := range rws.trailers {
+ if _, ok := rws.handlerHeader[trailer]; ok {
+ return true
+ }
+ }
+ return false
+}
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *responseWriterState) declareTrailer(k string) {
+ k = http.CanonicalHeaderKey(k)
+ if !httpguts.ValidTrailerHeader(k) {
+ // Forbidden by RFC 7230, section 4.1.2.
+ rws.conn.logf("ignoring invalid trailer %q", k)
+ return
+ }
+ if !strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ isHeadResp := rws.req.Method == "HEAD"
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
+ rws.sentContentLen = int64(cl)
+ } else {
+ clen = ""
+ }
+ }
+ _, hasContentLength := rws.snapHeader["Content-Length"]
+ if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ clen = strconv.Itoa(len(p))
+ }
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ // If the Content-Encoding is non-blank, we shouldn't
+ // sniff the body. See Issue golang.org/issue/31753.
+ ce := rws.snapHeader.Get("Content-Encoding")
+ hasCE := len(ce) > 0
+ if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
+ ctype = http.DetectContentType(p)
+ }
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
+ // but respect "Connection" == "close" to mean sending a GOAWAY and tearing
+ // down the TCP connection when idle, like we do for HTTP/1.
+ // TODO: remove more Connection-specific header fields here, in addition
+ // to "Connection".
+ if _, ok := rws.snapHeader["Connection"]; ok {
+ v := rws.snapHeader.Get("Connection")
+ delete(rws.snapHeader, "Connection")
+ if v == "close" {
+ rws.conn.startGracefulShutdown()
+ }
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ date: date,
+ })
+ if err != nil {
+ return 0, err
+ }
+ if endStream {
+ return 0, nil
+ }
+ }
+ if isHeadResp {
+ return len(p), nil
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+
+ // only send trailers if they have actually been defined by the
+ // server handler.
+ hasNonemptyTrailers := rws.hasNonemptyTrailers()
+ endStream := rws.handlerDone && !hasNonemptyTrailers
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && hasNonemptyTrailers {
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ return len(p), err
+ }
+ return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+//
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 7230
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclaring them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
+ }
+
+ if len(rws.trailers) > 1 {
+ sorter := sorterPool.Get().(*sorter)
+ sorter.SortStrings(rws.trailers)
+ sorterPool.Put(sorter)
+ }
+}
+
+func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
+ st := w.rws.stream
+ if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ // If we're setting a deadline in the past, reset the stream immediately
+ // so writes after SetWriteDeadline returns will fail.
+ st.onReadTimeout()
+ return nil
+ }
+ w.rws.conn.sendServeMsg(func(sc *serverConn) {
+ if st.readDeadline != nil {
+ if !st.readDeadline.Stop() {
+ // Deadline already exceeded, or stream has been closed.
+ return
+ }
+ }
+ if deadline.IsZero() {
+ st.readDeadline = nil
+ } else if st.readDeadline == nil {
+ st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+ } else {
+ st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+ }
+ })
+ return nil
+}
+
+func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
+ st := w.rws.stream
+ if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ // If we're setting a deadline in the past, reset the stream immediately
+ // so writes after SetWriteDeadline returns will fail.
+ st.onWriteTimeout()
+ return nil
+ }
+ w.rws.conn.sendServeMsg(func(sc *serverConn) {
+ if st.writeDeadline != nil {
+ if !st.writeDeadline.Stop() {
+ // Deadline already exceeded, or stream has been closed.
+ return
+ }
+ }
+ if deadline.IsZero() {
+ st.writeDeadline = nil
+ } else if st.writeDeadline == nil {
+ st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+ } else {
+ st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+ }
+ })
+ return nil
+}
+
+func (w *responseWriter) EnableFullDuplex() error {
+ // We always support full duplex responses, so this is a no-op.
+ return nil
+}
+
+func (w *responseWriter) Flush() {
+ w.FlushError()
+}
+
+func (w *responseWriter) FlushError() error {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ var err error
+ if rws.bw.Buffered() > 0 {
+ err = rws.bw.Flush()
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes), so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ _, err = chunkWriter{rws}.Write(nil)
+ if err == nil {
+ select {
+ case <-rws.stream.cw:
+ err = rws.stream.closeErr
+ default:
+ }
+ }
+ }
+ return err
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ cw := rws.stream.cw
+ go func() {
+ cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(http.Header)
+ }
+ return rws.handlerHeader
+}
+
+// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
+func checkWriteHeaderCode(code int) {
+ // Issue 22880: require valid WriteHeader status codes.
+ // For now we only enforce that it's three digits.
+ // In the future we might block things over 599 (600 and above aren't defined
+ // at http://httpwg.org/specs/rfc7231.html#status.codes).
+ // But for now any three digits.
+ //
+ // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
+ // no equivalent bogus thing we can realistically send in HTTP/2,
+ // so we'll consistently panic instead and help people find their bugs
+ // early. (We can't return an error from WriteHeader even if we wanted to.)
+ if code < 100 || code > 999 {
+ panic(fmt.Sprintf("invalid WriteHeader code %v", code))
+ }
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+ if rws.wroteHeader {
+ return
+ }
+
+ checkWriteHeaderCode(code)
+
+ // Handle informational headers
+ if code >= 100 && code <= 199 {
+ // Per RFC 8297 we must not clear the current header map
+ h := rws.handlerHeader
+
+ _, cl := h["Content-Length"]
+ _, te := h["Transfer-Encoding"]
+ if cl || te {
+ h = h.Clone()
+ h.Del("Content-Length")
+ h.Del("Transfer-Encoding")
+ }
+
+ rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: code,
+ h: h,
+ endStream: rws.handlerDone && !rws.hasTrailers(),
+ })
+
+ return
+ }
+
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = cloneHeader(rws.handlerHeader)
+ }
+}
+
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler might call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if !bodyAllowedForStatus(rws.status) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *responseWriter) handlerDone() {
+ rws := w.rws
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ responseWriterStatePool.Put(rws)
+}
+
+// Push errors.
+var (
+ ErrRecursivePush = errors.New("http2: recursive push not allowed")
+ ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
+)
+
+var _ http.Pusher = (*responseWriter)(nil)
+
+func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
+ st := w.rws.stream
+ sc := st.sc
+ sc.serveG.checkNotOn()
+
+ // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+ // http://tools.ietf.org/html/rfc7540#section-6.6
+ if st.isPushed() {
+ return ErrRecursivePush
+ }
+
+ if opts == nil {
+ opts = new(http.PushOptions)
+ }
+
+ // Default options.
+ if opts.Method == "" {
+ opts.Method = "GET"
+ }
+ if opts.Header == nil {
+ opts.Header = http.Header{}
+ }
+ wantScheme := "http"
+ if w.rws.req.TLS != nil {
+ wantScheme = "https"
+ }
+
+ // Validate the request.
+ u, err := url.Parse(target)
+ if err != nil {
+ return err
+ }
+ if u.Scheme == "" {
+ if !strings.HasPrefix(target, "/") {
+ return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
+ }
+ u.Scheme = wantScheme
+ u.Host = w.rws.req.Host
+ } else {
+ if u.Scheme != wantScheme {
+ return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
+ }
+ if u.Host == "" {
+ return errors.New("URL must have a host")
+ }
+ }
+ for k := range opts.Header {
+ if strings.HasPrefix(k, ":") {
+ return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
+ }
+ // These headers are meaningful only if the request has a body,
+ // but PUSH_PROMISE requests cannot have a body.
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ // Also disallow Host, since the promised URL must be absolute.
+ if asciiEqualFold(k, "content-length") ||
+ asciiEqualFold(k, "content-encoding") ||
+ asciiEqualFold(k, "trailer") ||
+ asciiEqualFold(k, "te") ||
+ asciiEqualFold(k, "expect") ||
+ asciiEqualFold(k, "host") {
+ return fmt.Errorf("promised request headers cannot include %q", k)
+ }
+ }
+ if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
+ return err
+ }
+
+ // The RFC effectively limits promised requests to GET and HEAD:
+ // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ if opts.Method != "GET" && opts.Method != "HEAD" {
+ return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
+ }
+
+ msg := &startPushRequest{
+ parent: st,
+ method: opts.Method,
+ url: u,
+ header: cloneHeader(opts.Header),
+ done: errChanPool.Get().(chan error),
+ }
+
+ select {
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ case sc.serveMsgCh <- msg:
+ }
+
+ select {
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ case err := <-msg.done:
+ errChanPool.Put(msg.done)
+ return err
+ }
+}
+
+type startPushRequest struct {
+ parent *stream
+ method string
+ url *url.URL
+ header http.Header
+ done chan error
+}
+
+func (sc *serverConn) startPush(msg *startPushRequest) {
+ sc.serveG.check()
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+ // is in either the "open" or "half-closed (remote)" state.
+ if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
+ // responseWriter.Push checks that the stream is peer-initiated.
+ msg.done <- errStreamClosed
+ return
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ if !sc.pushEnabled {
+ msg.done <- http.ErrNotSupported
+ return
+ }
+
+ // PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+ // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+ // is written. Once the ID is allocated, we start the request handler.
+ allocatePromisedID := func() (uint32, error) {
+ sc.serveG.check()
+
+ // Check this again, just in case. Technically, we might have received
+ // an updated SETTINGS by the time we got around to writing this frame.
+ if !sc.pushEnabled {
+ return 0, http.ErrNotSupported
+ }
+ // http://tools.ietf.org/html/rfc7540#section-6.5.2.
+ if sc.curPushedStreams+1 > sc.clientMaxStreams {
+ return 0, ErrPushLimitReached
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1.
+ // Streams initiated by the server MUST use even-numbered identifiers.
+ // A server that is unable to establish a new stream identifier can send a GOAWAY
+ // frame so that the client is forced to open a new connection for new streams.
+ if sc.maxPushPromiseID+2 >= 1<<31 {
+ sc.startGracefulShutdownInternal()
+ return 0, ErrPushLimitReached
+ }
+ sc.maxPushPromiseID += 2
+ promisedID := sc.maxPushPromiseID
+
+ // http://tools.ietf.org/html/rfc7540#section-8.2.
+ // Strictly speaking, the new stream should start in "reserved (local)", then
+ // transition to "half closed (remote)" after sending the initial HEADERS, but
+ // we start in "half closed (remote)" for simplicity.
+ // See further comments at the definition of stateHalfClosedRemote.
+ promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
+ Method: msg.method,
+ Scheme: msg.url.Scheme,
+ Authority: msg.url.Host,
+ Path: msg.url.RequestURI(),
+ Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+ })
+ if err != nil {
+ // Should not happen, since we've already validated msg.url.
+ panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
+ }
+
+ sc.curHandlers++
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+ return promisedID, nil
+ }
+
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePushPromise{
+ streamID: msg.parent.id,
+ method: msg.method,
+ url: msg.url,
+ h: msg.header,
+ allocatePromisedID: allocatePromisedID,
+ },
+ stream: msg.parent,
+ done: msg.done,
+ })
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 7230 section 7 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var connHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Connection",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func checkValidHTTP2RequestHeaders(h http.Header) error {
+ for _, k := range connHeaders {
+ if _, ok := h[k]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", k)
+ }
+ }
+ te := h["Te"]
+ if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+ return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+ }
+ return nil
+}
+
+func new400Handler(err error) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ }
+}
+
+// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
+// disabled. See comments on h1ServerShutdownChan above for why
+// the code is written this way.
+func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
+ var x interface{} = hs
+ type I interface {
+ doKeepAlives() bool
+ }
+ if hs, ok := x.(I); ok {
+ return !hs.doKeepAlives()
+ }
+ return false
+}
+
+func (sc *serverConn) countError(name string, err error) error {
+ if sc == nil || sc.srv == nil {
+ return err
+ }
+ f := sc.countErrorFunc
+ if f == nil {
+ return err
+ }
+ var typ string
+ var code ErrCode
+ switch e := err.(type) {
+ case ConnectionError:
+ typ = "conn"
+ code = ErrCode(e)
+ case StreamError:
+ typ = "stream"
+ code = ErrCode(e.Code)
+ default:
+ return err
+ }
+ codeStr := errCodeName[code]
+ if codeStr == "" {
+ codeStr = strconv.Itoa(int(code))
+ }
+ f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
+ return err
+}
diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go
new file mode 100644
index 0000000..0b1c17b
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/timer.go
@@ -0,0 +1,20 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package http2
+
+import "time"
+
+// A timer is a time.Timer, as an interface which can be replaced in tests.
+type timer = interface {
+ C() <-chan time.Time
+ Reset(d time.Duration) bool
+ Stop() bool
+}
+
+// timeTimer adapts a time.Timer to the timer interface.
+type timeTimer struct {
+ *time.Timer
+}
+
+func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
new file mode 100644
index 0000000..f26356b
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -0,0 +1,3287 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "crypto/rand"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "log"
+ "math"
+ "math/bits"
+ mathrand "math/rand"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/idna"
+ "golang.org/x/net/internal/httpcommon"
+)
+
+const (
+ // transportDefaultConnFlow is how many connection-level flow control
+ // tokens we give the server at start-up, past the default 64k.
+ transportDefaultConnFlow = 1 << 30
+
+ // transportDefaultStreamFlow is how many stream-level flow
+ // control tokens we announce to the peer, and how many bytes
+ // we buffer per stream.
+ transportDefaultStreamFlow = 4 << 20
+
+ defaultUserAgent = "Go-http-client/2.0"
+
+ // initialMaxConcurrentStreams is a connections maxConcurrentStreams until
+ // it's received servers initial SETTINGS frame, which corresponds with the
+ // spec's minimum recommended value.
+ initialMaxConcurrentStreams = 100
+
+ // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams
+ // if the server doesn't include one in its initial SETTINGS frame.
+ defaultMaxConcurrentStreams = 1000
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type Transport struct {
+ // DialTLSContext specifies an optional dial function with context for
+ // creating TLS connections for requests.
+ //
+ // If DialTLSContext and DialTLS is nil, tls.Dial is used.
+ //
+ // If the returned net.Conn has a ConnectionState method like tls.Conn,
+ // it will be used to set http.Response.TLS.
+ DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLSContext and DialTLS is nil, tls.Dial is used.
+ //
+ // Deprecated: Use DialTLSContext instead, which allows the transport
+ // to cancel dials as soon as they are no longer needed.
+ // If both are set, DialTLSContext takes priority.
+ DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // AllowHTTP, if true, permits HTTP/2 requests using the insecure,
+ // plain-text "http" scheme. Note that this does not enable h2c support.
+ AllowHTTP bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allowed. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an unlimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the
+ // initial settings frame. It is the size in bytes of the largest frame
+ // payload that the sender is willing to receive. If 0, no setting is
+ // sent, and the value is provided by the peer, which should be 16384
+ // according to the spec:
+ // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2.
+ // Values are bounded in the range 16k to 16M.
+ MaxReadFrameSize uint32
+
+ // MaxDecoderHeaderTableSize optionally specifies the http2
+ // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
+ // informs the remote endpoint of the maximum size of the header compression
+ // table used to decode header blocks, in octets. If zero, the default value
+ // of 4096 is used.
+ MaxDecoderHeaderTableSize uint32
+
+ // MaxEncoderHeaderTableSize optionally specifies an upper limit for the
+ // header compression table used for encoding request headers. Received
+ // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
+ // the default value of 4096 is used.
+ MaxEncoderHeaderTableSize uint32
+
+ // StrictMaxConcurrentStreams controls whether the server's
+ // SETTINGS_MAX_CONCURRENT_STREAMS should be respected
+ // globally. If false, new TCP connections are created to the
+ // server as needed to keep each under the per-connection
+ // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the
+ // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as
+ // a global limit and callers of RoundTrip block when needed,
+ // waiting for their turn.
+ StrictMaxConcurrentStreams bool
+
+ // IdleConnTimeout is the maximum amount of time an idle
+ // (keep-alive) connection will remain idle before closing
+ // itself.
+ // Zero means no limit.
+ IdleConnTimeout time.Duration
+
+ // ReadIdleTimeout is the timeout after which a health check using ping
+ // frame will be carried out if no frame is received on the connection.
+ // Note that a ping response will is considered a received frame, so if
+ // there is no other traffic on the connection, the health check will
+ // be performed every ReadIdleTimeout interval.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to Ping is not received.
+ // Defaults to 15s.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which the connection will be
+ // closed no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ WriteByteTimeout time.Duration
+
+ // CountError, if non-nil, is called on HTTP/2 transport errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *http.Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef ClientConnPool // non-nil version of ConnPool
+
+ *transportTestHooks
+}
+
+// Hook points used for testing.
+// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations.
+// Inside tests, see the testSyncHooks function docs.
+
+type transportTestHooks struct {
+ newclientconn func(*ClientConn)
+ group synctestGroupInterface
+}
+
+func (t *Transport) markNewGoroutine() {
+ if t != nil && t.transportTestHooks != nil {
+ t.transportTestHooks.group.Join()
+ }
+}
+
+func (t *Transport) now() time.Time {
+ if t != nil && t.transportTestHooks != nil {
+ return t.transportTestHooks.group.Now()
+ }
+ return time.Now()
+}
+
+func (t *Transport) timeSince(when time.Time) time.Duration {
+ if t != nil && t.transportTestHooks != nil {
+ return t.now().Sub(when)
+ }
+ return time.Since(when)
+}
+
+// newTimer creates a new time.Timer, or a synthetic timer in tests.
+func (t *Transport) newTimer(d time.Duration) timer {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.NewTimer(d)
+ }
+ return timeTimer{time.NewTimer(d)}
+}
+
+// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
+func (t *Transport) afterFunc(d time.Duration, f func()) timer {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.AfterFunc(d, f)
+ }
+ return timeTimer{time.AfterFunc(d, f)}
+}
+
+func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
+ }
+ return context.WithTimeout(ctx, d)
+}
+
+func (t *Transport) maxHeaderListSize() uint32 {
+ n := int64(t.MaxHeaderListSize)
+ if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
+ n = t.t1.MaxResponseHeaderBytes
+ if n > 0 {
+ n = adjustHTTP1MaxHeaderSize(n)
+ }
+ }
+ if n <= 0 {
+ return 10 << 20
+ }
+ if n >= 0xffffffff {
+ return 0
+ }
+ return uint32(n)
+}
+
+func (t *Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It returns an error if t1 has already been HTTP/2-enabled.
+//
+// Use ConfigureTransports instead to configure the HTTP/2 Transport.
+func ConfigureTransport(t1 *http.Transport) error {
+ _, err := ConfigureTransports(t1)
+ return err
+}
+
+// ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2.
+// It returns a new HTTP/2 Transport for further configuration.
+// It returns an error if t1 has already been HTTP/2-enabled.
+func ConfigureTransports(t1 *http.Transport) (*Transport, error) {
+ return configureTransports(t1)
+}
+
+func configureTransports(t1 *http.Transport) (*Transport, error) {
+ connPool := new(clientConnPool)
+ t2 := &Transport{
+ ConnPool: noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper {
+ addr := authorityAddr(scheme, authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ if scheme == "http" {
+ return (*unencryptedTransport)(t2)
+ }
+ return t2
+ }
+ if t1.TLSNextProto == nil {
+ t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
+ }
+ t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper {
+ return upgradeFn("https", authority, c)
+ }
+ // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+ t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper {
+ nc, err := unencryptedNetConnFromTLSConn(c)
+ if err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ }
+ return upgradeFn("http", authority, nc)
+ }
+ return t2, nil
+}
+
+// unencryptedTransport is a Transport with a RoundTrip method that
+// always permits http:// URLs.
+type unencryptedTransport Transport
+
+func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true})
+}
+
+func (t *Transport) connPool() ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
+}
+
+func (t *Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type ClientConn struct {
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+ atomicReused uint32 // whether conn is being reused; atomic
+ singleUse bool // whether being used for a single http.Request
+ getConnCalled bool // used by clientConnPool
+
+ // readLoop goroutine fields:
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+
+ idleTimeout time.Duration // or 0 for never
+ idleTimer timer
+
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow outflow // our conn-level flow control quota (cs.outflow is per stream)
+ inflow inflow // peer's conn-level flow control
+ doNotReuse bool // whether conn is marked to not be reused for any future requests
+ closing bool
+ closed bool
+ closedOnIdle bool // true if conn was closed for idleness
+ seenSettings bool // true if we've seen a settings frame, false otherwise
+ seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*clientStream // client-initiated
+ streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
+ nextStreamID uint32
+ pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
+ pings map[[8]byte]chan struct{} // in flight ping data to notification channel
+ br *bufio.Reader
+ lastActive time.Time
+ lastIdle time.Time // time last idle
+ // Settings from peer: (also guarded by wmu)
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ peerMaxHeaderTableSize uint32
+ initialWindowSize uint32
+ initialStreamRecvWindowSize int32
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
+ extendedConnectAllowed bool
+
+ // rstStreamPingsBlocked works around an unfortunate gRPC behavior.
+ // gRPC strictly limits the number of PING frames that it will receive.
+ // The default is two pings per two hours, but the limit resets every time
+ // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575.
+ //
+ // rstStreamPingsBlocked is set after receiving a response to a PING frame
+ // bundled with an RST_STREAM (see pendingResets below), and cleared after
+ // receiving a HEADERS or DATA frame.
+ rstStreamPingsBlocked bool
+
+ // pendingResets is the number of RST_STREAM frames we have sent to the peer,
+ // without confirming that the peer has received them. When we send a RST_STREAM,
+ // we bundle it with a PING frame, unless a PING is already in flight. We count
+ // the reset stream against the connection's concurrency limit until we get
+ // a PING response. This limits the number of requests we'll try to send to a
+ // completely unresponsive connection.
+ pendingResets int
+
+ // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
+ // Write to reqHeaderMu to lock it, read from it to unlock.
+ // Lock reqmu BEFORE mu or wmu.
+ reqHeaderMu chan struct{}
+
+ // wmu is held while writing.
+ // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
+ // Only acquire both at the same time when changing peer settings.
+ wmu sync.Mutex
+ bw *bufio.Writer
+ fr *Framer
+ werr error // first write error that has occurred
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type clientStream struct {
+ cc *ClientConn
+
+ // Fields of Request that we may access even after the response body is closed.
+ ctx context.Context
+ reqCancel <-chan struct{}
+
+ trace *httptrace.ClientTrace // or nil
+ ID uint32
+ bufPipe pipe // buffered pipe with the flow-controlled response payload
+ requestedGzip bool
+ isHead bool
+
+ abortOnce sync.Once
+ abort chan struct{} // closed to signal stream should end immediately
+ abortErr error // set if abort is closed
+
+ peerClosed chan struct{} // closed when the peer sends an END_STREAM flag
+ donec chan struct{} // closed after the stream is in the closed state
+ on100 chan struct{} // buffered; written to if a 100 is received
+
+ respHeaderRecv chan struct{} // closed when headers are received
+ res *http.Response // set if respHeaderRecv is closed
+
+ flow outflow // guarded by cc.mu
+ inflow inflow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+
+ reqBody io.ReadCloser
+ reqBodyContentLength int64 // -1 means unknown
+ reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done
+
+ // owned by writeRequest:
+ sentEndStream bool // sent an END_STREAM flag to the peer
+ sentHeaders bool
+
+ // owned by clientConnReadLoop:
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+ readClosed bool // peer sent an END_STREAM flag
+ readAborted bool // read loop reset the stream
+ totalHeaderSize int64 // total size of 1xx headers seen
+
+ trailer http.Header // accumulated trailers
+ resTrailer *http.Header // client's Response.Trailer
+}
+
+var got1xxFuncForTests func(int, textproto.MIMEHeader) error
+
+// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
+// if any. It returns nil if not set or if the Go version is too old.
+func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
+ if fn := got1xxFuncForTests; fn != nil {
+ return fn
+ }
+ return traceGot1xxResponseFunc(cs.trace)
+}
+
+func (cs *clientStream) abortStream(err error) {
+ cs.cc.mu.Lock()
+ defer cs.cc.mu.Unlock()
+ cs.abortStreamLocked(err)
+}
+
+func (cs *clientStream) abortStreamLocked(err error) {
+ cs.abortOnce.Do(func() {
+ cs.abortErr = err
+ close(cs.abort)
+ })
+ if cs.reqBody != nil {
+ cs.closeReqBodyLocked()
+ }
+ // TODO(dneil): Clean up tests where cs.cc.cond is nil.
+ if cs.cc.cond != nil {
+ // Wake up writeRequestBody if it is waiting on flow control.
+ cs.cc.cond.Broadcast()
+ }
+}
+
+func (cs *clientStream) abortRequestBodyWrite() {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if cs.reqBody != nil && cs.reqBodyClosed == nil {
+ cs.closeReqBodyLocked()
+ cc.cond.Broadcast()
+ }
+}
+
+func (cs *clientStream) closeReqBodyLocked() {
+ if cs.reqBodyClosed != nil {
+ return
+ }
+ cs.reqBodyClosed = make(chan struct{})
+ reqBodyClosed := cs.reqBodyClosed
+ go func() {
+ cs.cc.t.markNewGoroutine()
+ cs.reqBody.Close()
+ close(reqBodyClosed)
+ }()
+}
+
+type stickyErrWriter struct {
+ group synctestGroupInterface
+ conn net.Conn
+ timeout time.Duration
+ err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ *sew.err = err
+ return n, err
+}
+
+// noCachedConnError is the concrete type of ErrNoCachedConn, which
+// needs to be detected by net/http regardless of whether it's its
+// bundled version (in h2_bundle.go with a rewritten type name) or
+// from a user's x/net/http2. As such, as it has a unique method name
+// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
+// isNoCachedConnError.
+type noCachedConnError struct{}
+
+func (noCachedConnError) IsHTTP2NoCachedConnError() {}
+func (noCachedConnError) Error() string { return "http2: no cached connection was available" }
+
+// isNoCachedConnError reports whether err is of type noCachedConnError
+// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
+// may coexist in the same running program.
+func isNoCachedConnError(err error) bool {
+ _, ok := err.(interface{ IsHTTP2NoCachedConnError() })
+ return ok
+}
+
+var ErrNoCachedConn error = noCachedConnError{}
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+
+ allowHTTP bool // allow http:// URLs
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.RoundTripOpt(req, RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(scheme string, authority string) (addr string) {
+ host, port, err := net.SplitHostPort(authority)
+ if err != nil { // authority didn't have a port
+ host = authority
+ port = ""
+ }
+ if port == "" { // authority's port was empty
+ port = "443"
+ if scheme == "http" {
+ port = "80"
+ }
+ }
+ if a, err := idna.ToASCII(host); err == nil {
+ host = a
+ }
+ // IPv6 address literal, without a port:
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ return host + ":" + port
+ }
+ return net.JoinHostPort(host, port)
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ switch req.URL.Scheme {
+ case "https":
+ // Always okay.
+ case "http":
+ if !t.AllowHTTP && !opt.allowHTTP {
+ return nil, errors.New("http2: unencrypted HTTP/2 not enabled")
+ }
+ default:
+ return nil, errors.New("http2: unsupported scheme")
+ }
+
+ addr := authorityAddr(req.URL.Scheme, req.URL.Host)
+ for retry := 0; ; retry++ {
+ cc, err := t.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+ return nil, err
+ }
+ reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1)
+ traceGotConn(req, cc, reused)
+ res, err := cc.RoundTrip(req)
+ if err != nil && retry <= 6 {
+ roundTripErr := err
+ if req, err = shouldRetryRequest(req, err); err == nil {
+ // After the first retry, do exponential backoff with 10% jitter.
+ if retry == 0 {
+ t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
+ continue
+ }
+ backoff := float64(uint(1) << (uint(retry) - 1))
+ backoff += backoff * (0.1 * mathrand.Float64())
+ d := time.Second * time.Duration(backoff)
+ tm := t.newTimer(d)
+ select {
+ case <-tm.C():
+ t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
+ continue
+ case <-req.Context().Done():
+ tm.Stop()
+ err = req.Context().Err()
+ }
+ }
+ }
+ if err == errClientConnNotEstablished {
+ // This ClientConn was created recently,
+ // this is the first request to use it,
+ // and the connection is closed and not usable.
+ //
+ // In this state, cc.idleTimer will remove the conn from the pool
+ // when it fires. Stop the timer and remove it here so future requests
+ // won't try to use this connection.
+ //
+ // If the timer has already fired and we're racing it, the redundant
+ // call to MarkDead is harmless.
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+ t.connPool().MarkDead(cc)
+ }
+ if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+ if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {
+ cp.closeIdleConnections()
+ }
+}
+
+var (
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+ errClientConnNotEstablished = errors.New("http2: client conn could not be established")
+ errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+)
+
+// shouldRetryRequest is called by RoundTrip when a request fails to get
+// response headers. It is always called with a non-nil error.
+// It returns either a request to retry (either the same request, or a
+// modified clone), or an error if the request can't be replayed.
+func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
+ if !canRetryError(err) {
+ return nil, err
+ }
+ // If the Body is nil (or http.NoBody), it's safe to reuse
+ // this request and its Body.
+ if req.Body == nil || req.Body == http.NoBody {
+ return req, nil
+ }
+
+ // If the request body can be reset back to its original
+ // state via the optional req.GetBody, do that.
+ if req.GetBody != nil {
+ body, err := req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ newReq := *req
+ newReq.Body = body
+ return &newReq, nil
+ }
+
+ // The Request.Body can't reset back to the beginning, but we
+ // don't seem to have started to read from it yet, so reuse
+ // the request directly.
+ if err == errClientConnUnusable {
+ return req, nil
+ }
+
+ return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
+}
+
+func canRetryError(err error) bool {
+ if err == errClientConnUnusable || err == errClientConnGotGoAway {
+ return true
+ }
+ if se, ok := err.(StreamError); ok {
+ if se.Code == ErrCodeProtocol && se.Cause == errFromPeer {
+ // See golang/go#47635, golang/go#42777
+ return true
+ }
+ return se.Code == ErrCodeRefusedStream
+ }
+ return false
+}
+
+func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
+ if t.transportTestHooks != nil {
+ return t.newClientConn(nil, singleUse)
+ }
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host))
+ if err != nil {
+ return nil, err
+ }
+ return t.newClientConn(tconn, singleUse)
+}
+
+func (t *Transport) newTLSConfig(host string) *tls.Config {
+ cfg := new(tls.Config)
+ if t.TLSClientConfig != nil {
+ *cfg = *t.TLSClientConfig.Clone()
+ }
+ if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
+ cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
+ }
+ if cfg.ServerName == "" {
+ cfg.ServerName = host
+ }
+ return cfg
+}
+
+func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) {
+ if t.DialTLSContext != nil {
+ return t.DialTLSContext(ctx, network, addr, tlsCfg)
+ } else if t.DialTLS != nil {
+ return t.DialTLS(network, addr, tlsCfg)
+ }
+
+ tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg)
+ if err != nil {
+ return nil, err
+ }
+ state := tlsCn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
+ }
+ return tlsCn, nil
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *Transport) expectContinueTimeout() time.Duration {
+ if t.t1 == nil {
+ return 0
+ }
+ return t.t1.ExpectContinueTimeout
+}
+
+func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
+ return t.newClientConn(c, t.disableKeepAlives())
+}
+
+func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+ conf := configFromTransport(t)
+ cc := &ClientConn{
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
+ maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ seenSettingsChan: make(chan struct{}),
+ wantSettingsAck: true,
+ readIdleTimeout: conf.SendPingTimeout,
+ pingTimeout: conf.PingTimeout,
+ pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
+ lastActive: t.now(),
+ }
+ var group synctestGroupInterface
+ if t.transportTestHooks != nil {
+ t.markNewGoroutine()
+ t.transportTestHooks.newclientconn(cc)
+ c = cc.tconn
+ group = t.group
+ }
+ if VerboseLogs {
+ t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
+ }
+
+ cc.cond = sync.NewCond(&cc.mu)
+ cc.flow.add(int32(initialWindowSize))
+
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
+ cc.bw = bufio.NewWriter(stickyErrWriter{
+ group: group,
+ conn: c,
+ timeout: conf.WriteByteTimeout,
+ err: &cc.werr,
+ })
+ cc.br = bufio.NewReader(c)
+ cc.fr = NewFramer(cc.bw, cc.br)
+ cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
+ if t.CountError != nil {
+ cc.fr.countError = t.CountError
+ }
+ maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
+ cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+ cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
+ cc.peerMaxHeaderTableSize = initialHeaderTableSize
+
+ if cs, ok := c.(connectionStater); ok {
+ state := cs.ConnectionState()
+ cc.tlsState = &state
+ }
+
+ initialSettings := []Setting{
+ {ID: SettingEnablePush, Val: 0},
+ {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
+ }
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
+ }
+ if maxHeaderTableSize != initialHeaderTableSize {
+ initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize})
+ }
+
+ cc.bw.Write(clientPreface)
+ cc.fr.WriteSettings(initialSettings...)
+ cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
+ cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
+ cc.bw.Flush()
+ if cc.werr != nil {
+ cc.Close()
+ return nil, cc.werr
+ }
+
+ // Start the idle timer after the connection is fully initialized.
+ if d := t.idleConnTimeout(); d != 0 {
+ cc.idleTimeout = d
+ cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+ }
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *ClientConn) healthCheck() {
+ pingTimeout := cc.pingTimeout
+ // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
+ // trigger the healthCheck again if there is no frame received.
+ ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
+ defer cancel()
+ cc.vlogf("http2: Transport sending health check")
+ err := cc.Ping(ctx)
+ if err != nil {
+ cc.vlogf("http2: Transport health check failure: %v", err)
+ cc.closeForLostPing()
+ } else {
+ cc.vlogf("http2: Transport health check success")
+ }
+}
+
+// SetDoNotReuse marks cc as not reusable for future HTTP requests.
+func (cc *ClientConn) SetDoNotReuse() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.doNotReuse = true
+}
+
+func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ old := cc.goAway
+ cc.goAway = f
+
+ // Merge the previous and current GoAway error frames.
+ if cc.goAwayDebug == "" {
+ cc.goAwayDebug = string(f.DebugData())
+ }
+ if old != nil && old.ErrCode != ErrCodeNo {
+ cc.goAway.ErrCode = old.ErrCode
+ }
+ last := f.LastStreamID
+ for streamID, cs := range cc.streams {
+ if streamID <= last {
+ // The server's GOAWAY indicates that it received this stream.
+ // It will either finish processing it, or close the connection
+ // without doing so. Either way, leave the stream alone for now.
+ continue
+ }
+ if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo {
+ // Don't retry the first stream on a connection if we get a non-NO error.
+ // If the server is sending an error on a new connection,
+ // retrying the request on a new one probably isn't going to work.
+ cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode))
+ } else {
+ // Aborting the stream with errClentConnGotGoAway indicates that
+ // the request should be retried on a new connection.
+ cs.abortStreamLocked(errClientConnGotGoAway)
+ }
+ }
+}
+
+// CanTakeNewRequest reports whether the connection can take a new request,
+// meaning it has not been closed or received or sent a GOAWAY.
+//
+// If the caller is going to immediately make a new request on this
+// connection, use ReserveNewRequest instead.
+func (cc *ClientConn) CanTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.canTakeNewRequestLocked()
+}
+
+// ReserveNewRequest is like CanTakeNewRequest but also reserves a
+// concurrent stream in cc. The reservation is decremented on the
+// next call to RoundTrip.
+func (cc *ClientConn) ReserveNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if st := cc.idleStateLocked(); !st.canTakeNewRequest {
+ return false
+ }
+ cc.streamsReserved++
+ return true
+}
+
+// ClientConnState describes the state of a ClientConn.
+type ClientConnState struct {
+ // Closed is whether the connection is closed.
+ Closed bool
+
+ // Closing is whether the connection is in the process of
+ // closing. It may be closing due to shutdown, being a
+ // single-use connection, being marked as DoNotReuse, or
+ // having received a GOAWAY frame.
+ Closing bool
+
+ // StreamsActive is how many streams are active.
+ StreamsActive int
+
+ // StreamsReserved is how many streams have been reserved via
+ // ClientConn.ReserveNewRequest.
+ StreamsReserved int
+
+ // StreamsPending is how many requests have been sent in excess
+ // of the peer's advertised MaxConcurrentStreams setting and
+ // are waiting for other streams to complete.
+ StreamsPending int
+
+ // MaxConcurrentStreams is how many concurrent streams the
+ // peer advertised as acceptable. Zero means no SETTINGS
+ // frame has been received yet.
+ MaxConcurrentStreams uint32
+
+ // LastIdle, if non-zero, is when the connection last
+ // transitioned to idle state.
+ LastIdle time.Time
+}
+
+// State returns a snapshot of cc's state.
+func (cc *ClientConn) State() ClientConnState {
+ cc.wmu.Lock()
+ maxConcurrent := cc.maxConcurrentStreams
+ if !cc.seenSettings {
+ maxConcurrent = 0
+ }
+ cc.wmu.Unlock()
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return ClientConnState{
+ Closed: cc.closed,
+ Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil,
+ StreamsActive: len(cc.streams) + cc.pendingResets,
+ StreamsReserved: cc.streamsReserved,
+ StreamsPending: cc.pendingRequests,
+ LastIdle: cc.lastIdle,
+ MaxConcurrentStreams: maxConcurrent,
+ }
+}
+
+// clientConnIdleState describes the suitability of a client
+// connection to initiate a new RoundTrip request.
+type clientConnIdleState struct {
+ canTakeNewRequest bool
+}
+
+func (cc *ClientConn) idleState() clientConnIdleState {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.idleStateLocked()
+}
+
+func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
+ if cc.singleUse && cc.nextStreamID > 1 {
+ return
+ }
+ var maxConcurrentOkay bool
+ if cc.t.StrictMaxConcurrentStreams {
+ // We'll tell the caller we can take a new request to
+ // prevent the caller from dialing a new TCP
+ // connection, but then we'll block later before
+ // writing it.
+ maxConcurrentOkay = true
+ } else {
+ // We can take a new request if the total of
+ // - active streams;
+ // - reservation slots for new streams; and
+ // - streams for which we have sent a RST_STREAM and a PING,
+ // but received no subsequent frame
+ // is less than the concurrency limit.
+ maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams)
+ }
+
+ st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
+ !cc.doNotReuse &&
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
+ !cc.tooIdleLocked()
+
+ // If this connection has never been used for a request and is closed,
+ // then let it take a request (which will fail).
+ // If the conn was closed for idleness, we're racing the idle timer;
+ // don't try to use the conn. (Issue #70515.)
+ //
+ // This avoids a situation where an error early in a connection's lifetime
+ // goes unreported.
+ if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle {
+ st.canTakeNewRequest = true
+ }
+
+ return
+}
+
+// currentRequestCountLocked reports the number of concurrency slots currently in use,
+// including active streams, reserved slots, and reset streams waiting for acknowledgement.
+func (cc *ClientConn) currentRequestCountLocked() int {
+ return len(cc.streams) + cc.streamsReserved + cc.pendingResets
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+ st := cc.idleStateLocked()
+ return st.canTakeNewRequest
+}
+
+// tooIdleLocked reports whether this connection has been been sitting idle
+// for too much wall time.
+func (cc *ClientConn) tooIdleLocked() bool {
+ // The Round(0) strips the monontonic clock reading so the
+ // times are compared based on their wall time. We don't want
+ // to reuse a connection that's been sitting idle during
+ // VM/laptop suspend if monotonic time was also frozen.
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
+}
+
+// onIdleTimeout is called from a time.AfterFunc goroutine. It will
+// only be called when we're idle, but because we're coming from a new
+// goroutine, there could be a new request coming in at the same time,
+// so this simply calls the synchronized closeIfIdle to shut down this
+// connection. The timer could just call closeIfIdle, but this is more
+// clear.
+func (cc *ClientConn) onIdleTimeout() {
+ cc.closeIfIdle()
+}
+
+func (cc *ClientConn) closeConn() {
+ t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)
+ defer t.Stop()
+ cc.tconn.Close()
+}
+
+// A tls.Conn.Close can hang for a long time if the peer is unresponsive.
+// Try to shut it down more aggressively.
+func (cc *ClientConn) forceCloseConn() {
+ tc, ok := cc.tconn.(*tls.Conn)
+ if !ok {
+ return
+ }
+ if nc := tc.NetConn(); nc != nil {
+ nc.Close()
+ }
+}
+
+func (cc *ClientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 || cc.streamsReserved > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ cc.closedOnIdle = true
+ nextID := cc.nextStreamID
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ if VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
+ }
+ cc.closeConn()
+}
+
+func (cc *ClientConn) isDoNotReuseAndIdle() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.doNotReuse && len(cc.streams) == 0
+}
+
+var shutdownEnterWaitStateHook = func() {}
+
+// Shutdown gracefully closes the client connection, waiting for running streams to complete.
+func (cc *ClientConn) Shutdown(ctx context.Context) error {
+ if err := cc.sendGoAway(); err != nil {
+ return err
+ }
+ // Wait for all in-flight streams to complete or connection to close
+ done := make(chan struct{})
+ cancelled := false // guarded by cc.mu
+ go func() {
+ cc.t.markNewGoroutine()
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if len(cc.streams) == 0 || cc.closed {
+ cc.closed = true
+ close(done)
+ break
+ }
+ if cancelled {
+ break
+ }
+ cc.cond.Wait()
+ }
+ }()
+ shutdownEnterWaitStateHook()
+ select {
+ case <-done:
+ cc.closeConn()
+ return nil
+ case <-ctx.Done():
+ cc.mu.Lock()
+ // Free the goroutine above
+ cancelled = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ return ctx.Err()
+ }
+}
+
+func (cc *ClientConn) sendGoAway() error {
+ cc.mu.Lock()
+ closing := cc.closing
+ cc.closing = true
+ maxStreamID := cc.nextStreamID
+ cc.mu.Unlock()
+ if closing {
+ // GOAWAY sent already
+ return nil
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ // Send a graceful shutdown frame to server
+ if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
+ return err
+ }
+ if err := cc.bw.Flush(); err != nil {
+ return err
+ }
+ // Prevent new requests
+ return nil
+}
+
+// closes the client connection immediately. In-flight requests are interrupted.
+// err is sent to streams.
+func (cc *ClientConn) closeForError(err error) {
+ cc.mu.Lock()
+ cc.closed = true
+ for _, cs := range cc.streams {
+ cs.abortStreamLocked(err)
+ }
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ cc.closeConn()
+}
+
+// Close closes the client connection immediately.
+//
+// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
+func (cc *ClientConn) Close() error {
+ err := errors.New("http2: client connection force closed via ClientConn.Close")
+ cc.closeForError(err)
+ return nil
+}
+
+// closes the client connection immediately. In-flight requests are interrupted.
+func (cc *ClientConn) closeForLostPing() {
+ err := errors.New("http2: client connection lost")
+ if f := cc.t.CountError; f != nil {
+ f("conn_close_lost_ping")
+ }
+ cc.closeForError(err)
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+func (cc *ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func actualContentLength(req *http.Request) int64 {
+ if req.Body == nil || req.Body == http.NoBody {
+ return 0
+ }
+ if req.ContentLength != 0 {
+ return req.ContentLength
+ }
+ return -1
+}
+
+func (cc *ClientConn) decrStreamReservations() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.decrStreamReservationsLocked()
+}
+
+func (cc *ClientConn) decrStreamReservationsLocked() {
+ if cc.streamsReserved > 0 {
+ cc.streamsReserved--
+ }
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ return cc.roundTrip(req, nil)
+}
+
+func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) {
+ ctx := req.Context()
+ cs := &clientStream{
+ cc: cc,
+ ctx: ctx,
+ reqCancel: req.Cancel,
+ isHead: req.Method == "HEAD",
+ reqBody: req.Body,
+ reqBodyContentLength: actualContentLength(req),
+ trace: httptrace.ContextClientTrace(ctx),
+ peerClosed: make(chan struct{}),
+ abort: make(chan struct{}),
+ respHeaderRecv: make(chan struct{}),
+ donec: make(chan struct{}),
+ }
+
+ cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression())
+
+ go cs.doRequest(req, streamf)
+
+ waitDone := func() error {
+ select {
+ case <-cs.donec:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ }
+ }
+
+ handleResponseHeaders := func() (*http.Response, error) {
+ res := cs.res
+ if res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ cs.abortRequestBodyWrite()
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ if res.Body == noBody && actualContentLength(req) == 0 {
+ // If there isn't a request or response body still being
+ // written, then wait for the stream to be closed before
+ // RoundTrip returns.
+ if err := waitDone(); err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+ }
+
+ cancelRequest := func(cs *clientStream, err error) error {
+ cs.cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed
+ cs.cc.mu.Unlock()
+ // Wait for the request body to be closed.
+ //
+ // If nothing closed the body before now, abortStreamLocked
+ // will have started a goroutine to close it.
+ //
+ // Closing the body before returning avoids a race condition
+ // with net/http checking its readTrackingBody to see if the
+ // body was read from or closed. See golang/go#60041.
+ //
+ // The body is closed in a separate goroutine without the
+ // connection mutex held, but dropping the mutex before waiting
+ // will keep us from holding it indefinitely if the body
+ // close is slow for some reason.
+ if bodyClosed != nil {
+ <-bodyClosed
+ }
+ return err
+ }
+
+ for {
+ select {
+ case <-cs.respHeaderRecv:
+ return handleResponseHeaders()
+ case <-cs.abort:
+ select {
+ case <-cs.respHeaderRecv:
+ // If both cs.respHeaderRecv and cs.abort are signaling,
+ // pick respHeaderRecv. The server probably wrote the
+ // response and immediately reset the stream.
+ // golang.org/issue/49645
+ return handleResponseHeaders()
+ default:
+ waitDone()
+ return nil, cs.abortErr
+ }
+ case <-ctx.Done():
+ err := ctx.Err()
+ cs.abortStream(err)
+ return nil, cancelRequest(cs, err)
+ case <-cs.reqCancel:
+ cs.abortStream(errRequestCanceled)
+ return nil, cancelRequest(cs, errRequestCanceled)
+ }
+ }
+}
+
+// doRequest runs for the duration of the request lifetime.
+//
+// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
+func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
+ cs.cc.t.markNewGoroutine()
+ err := cs.writeRequest(req, streamf)
+ cs.cleanupWriteRequest(err)
+}
+
+var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer")
+
+// writeRequest sends a request.
+//
+// It returns nil after the request is written, the response read,
+// and the request stream is half-closed by the peer.
+//
+// It returns non-nil if the request ends otherwise.
+// If the returned error is StreamError, the error Code may be used in resetting the stream.
+func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) {
+ cc := cs.cc
+ ctx := cs.ctx
+
+ // wait for setting frames to be received, a server can change this value later,
+ // but we just wait for the first settings frame
+ var isExtendedConnect bool
+ if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" {
+ isExtendedConnect = true
+ }
+
+ // Acquire the new-request lock by writing to reqHeaderMu.
+ // This lock guards the critical section covering allocating a new stream ID
+ // (requires mu) and creating the stream (requires wmu).
+ if cc.reqHeaderMu == nil {
+ panic("RoundTrip on uninitialized ClientConn") // for tests
+ }
+ if isExtendedConnect {
+ select {
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.seenSettingsChan:
+ if !cc.extendedConnectAllowed {
+ return errExtendedConnectNotSupported
+ }
+ }
+ }
+ select {
+ case cc.reqHeaderMu <- struct{}{}:
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ cc.mu.Lock()
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+ cc.decrStreamReservationsLocked()
+ if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil {
+ cc.mu.Unlock()
+ <-cc.reqHeaderMu
+ return err
+ }
+ cc.addStreamLocked(cs) // assigns stream ID
+ if isConnectionCloseRequest(req) {
+ cc.doNotReuse = true
+ }
+ cc.mu.Unlock()
+
+ if streamf != nil {
+ streamf(cs)
+ }
+
+ continueTimeout := cc.t.expectContinueTimeout()
+ if continueTimeout != 0 {
+ if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") {
+ continueTimeout = 0
+ } else {
+ cs.on100 = make(chan struct{}, 1)
+ }
+ }
+
+ // Past this point (where we send request headers), it is possible for
+ // RoundTrip to return successfully. Since the RoundTrip contract permits
+ // the caller to "mutate or reuse" the Request after closing the Response's Body,
+ // we must take care when referencing the Request from here on.
+ err = cs.encodeAndWriteHeaders(req)
+ <-cc.reqHeaderMu
+ if err != nil {
+ return err
+ }
+
+ hasBody := cs.reqBodyContentLength != 0
+ if !hasBody {
+ cs.sentEndStream = true
+ } else {
+ if continueTimeout != 0 {
+ traceWait100Continue(cs.trace)
+ timer := time.NewTimer(continueTimeout)
+ select {
+ case <-timer.C:
+ err = nil
+ case <-cs.on100:
+ err = nil
+ case <-cs.abort:
+ err = cs.abortErr
+ case <-ctx.Done():
+ err = ctx.Err()
+ case <-cs.reqCancel:
+ err = errRequestCanceled
+ }
+ timer.Stop()
+ if err != nil {
+ traceWroteRequest(cs.trace, err)
+ return err
+ }
+ }
+
+ if err = cs.writeRequestBody(req); err != nil {
+ if err != errStopReqBodyWrite {
+ traceWroteRequest(cs.trace, err)
+ return err
+ }
+ } else {
+ cs.sentEndStream = true
+ }
+ }
+
+ traceWroteRequest(cs.trace, err)
+
+ var respHeaderTimer <-chan time.Time
+ var respHeaderRecv chan struct{}
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := cc.t.newTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C()
+ respHeaderRecv = cs.respHeaderRecv
+ }
+ // Wait until the peer half-closes its end of the stream,
+ // or until the request is aborted (via context, error, or otherwise),
+ // whichever comes first.
+ for {
+ select {
+ case <-cs.peerClosed:
+ return nil
+ case <-respHeaderTimer:
+ return errTimeout
+ case <-respHeaderRecv:
+ respHeaderRecv = nil
+ respHeaderTimer = nil // keep waiting for END_STREAM
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ }
+ }
+}
+
+func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
+ cc := cs.cc
+ ctx := cs.ctx
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ // If the request was canceled while waiting for cc.mu, just quit.
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ default:
+ }
+
+ // Encode headers.
+ //
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ cc.hbuf.Reset()
+ res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) {
+ cc.writeHeader(name, value)
+ })
+ if err != nil {
+ return fmt.Errorf("http2: %w", err)
+ }
+ hdrs := cc.hbuf.Bytes()
+
+ // Write the request.
+ endStream := !res.HasBody && !res.HasTrailers
+ cs.sentHeaders = true
+ err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
+ traceWroteHeaders(cs.trace)
+ return err
+}
+
+func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) {
+ return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{
+ Request: httpcommon.Request{
+ Header: req.Header,
+ Trailer: req.Trailer,
+ URL: req.URL,
+ Host: req.Host,
+ Method: req.Method,
+ ActualContentLength: actualContentLength(req),
+ },
+ AddGzipHeader: addGzipHeader,
+ PeerMaxHeaderListSize: peerMaxHeaderListSize,
+ DefaultUserAgent: defaultUserAgent,
+ }, headerf)
+}
+
+// cleanupWriteRequest performs post-request tasks.
+//
+// If err (the result of writeRequest) is non-nil and the stream is not closed,
+// cleanupWriteRequest will send a reset to the peer.
+func (cs *clientStream) cleanupWriteRequest(err error) {
+ cc := cs.cc
+
+ if cs.ID == 0 {
+ // We were canceled before creating the stream, so return our reservation.
+ cc.decrStreamReservations()
+ }
+
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cc.mu.Lock()
+ mustCloseBody := false
+ if cs.reqBody != nil && cs.reqBodyClosed == nil {
+ mustCloseBody = true
+ cs.reqBodyClosed = make(chan struct{})
+ }
+ bodyClosed := cs.reqBodyClosed
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
+ cc.mu.Unlock()
+ if mustCloseBody {
+ cs.reqBody.Close()
+ close(bodyClosed)
+ }
+ if bodyClosed != nil {
+ <-bodyClosed
+ }
+
+ if err != nil && cs.sentEndStream {
+ // If the connection is closed immediately after the response is read,
+ // we may be aborted before finishing up here. If the stream was closed
+ // cleanly on both sides, there is no error.
+ select {
+ case <-cs.peerClosed:
+ err = nil
+ default:
+ }
+ }
+ if err != nil {
+ cs.abortStream(err) // possibly redundant, but harmless
+ if cs.sentHeaders {
+ if se, ok := err.(StreamError); ok {
+ if se.Cause != errFromPeer {
+ cc.writeStreamReset(cs.ID, se.Code, false, err)
+ }
+ } else {
+ // We're cancelling an in-flight request.
+ //
+ // This could be due to the server becoming unresponsive.
+ // To avoid sending too many requests on a dead connection,
+ // we let the request continue to consume a concurrency slot
+ // until we can confirm the server is still responding.
+ // We do this by sending a PING frame along with the RST_STREAM
+ // (unless a ping is already in flight).
+ //
+ // For simplicity, we don't bother tracking the PING payload:
+ // We reset cc.pendingResets any time we receive a PING ACK.
+ //
+ // We skip this if the conn is going to be closed on idle,
+ // because it's short lived and will probably be closed before
+ // we get the ping response.
+ ping := false
+ if !closeOnIdle {
+ cc.mu.Lock()
+ // rstStreamPingsBlocked works around a gRPC behavior:
+ // see comment on the field for details.
+ if !cc.rstStreamPingsBlocked {
+ if cc.pendingResets == 0 {
+ ping = true
+ }
+ cc.pendingResets++
+ }
+ cc.mu.Unlock()
+ }
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err)
+ }
+ }
+ cs.bufPipe.CloseWithError(err) // no-op if already closed
+ } else {
+ if cs.sentHeaders && !cs.sentEndStream {
+ cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil)
+ }
+ cs.bufPipe.CloseWithError(errRequestCanceled)
+ }
+ if cs.ID != 0 {
+ cc.forgetStreamID(cs.ID)
+ }
+
+ cc.wmu.Lock()
+ werr := cc.werr
+ cc.wmu.Unlock()
+ if werr != nil {
+ cc.Close()
+ }
+
+ close(cs.donec)
+}
+
+// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams.
+// Must hold cc.mu.
+func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
+ for {
+ if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 {
+ // This is the very first request sent to this connection.
+ // Return a fatal error which aborts the retry loop.
+ return errClientConnNotEstablished
+ }
+ cc.lastActive = cc.t.now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ return errClientConnUnusable
+ }
+ cc.lastIdle = time.Time{}
+ if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) {
+ return nil
+ }
+ cc.pendingRequests++
+ cc.cond.Wait()
+ cc.pendingRequests--
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ default:
+ }
+ }
+}
+
+// requires cc.wmu be held
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > maxFrameSize {
+ chunk = chunk[:maxFrameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ cc.bw.Flush()
+ return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+
+ errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
+)
+
+// frameScratchBufferLen returns the length of a buffer to use for
+// outgoing request bodies to read/write to/from.
+//
+// It returns max(1, min(peer's advertised max frame size,
+// Request.ContentLength+1, 512KB)).
+func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
+ const max = 512 << 10
+ n := int64(maxFrameSize)
+ if n > max {
+ n = max
+ }
+ if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n {
+ // Add an extra byte past the declared content-length to
+ // give the caller's Request.Body io.Reader a chance to
+ // give us more bytes than they declared, so we can catch it
+ // early.
+ n = cl + 1
+ }
+ if n < 1 {
+ return 1
+ }
+ return int(n) // doesn't truncate; max is 512K
+}
+
+// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
+// streaming requests using small frame sizes occupy large buffers initially allocated for prior
+// requests needing big buffers. The size ranges are as follows:
+// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
+// {256 KB, 512 KB], {512 KB, infinity}
+// In practice, the maximum scratch buffer size should not exceed 512 KB due to
+// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
+// It exists mainly as a safety measure, for potential future increases in max buffer size.
+var bufPools [7]sync.Pool // of *[]byte
+func bufPoolIndex(size int) int {
+ if size <= 16384 {
+ return 0
+ }
+ size -= 1
+ bits := bits.Len(uint(size))
+ index := bits - 14
+ if index >= len(bufPools) {
+ return len(bufPools) - 1
+ }
+ return index
+}
+
+func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
+ cc := cs.cc
+ body := cs.reqBody
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+
+ hasTrailers := req.Trailer != nil
+ remainLen := cs.reqBodyContentLength
+ hasContentLen := remainLen != -1
+
+ cc.mu.Lock()
+ maxFrameSize := int(cc.maxFrameSize)
+ cc.mu.Unlock()
+
+ // Scratch buffer for reading into & writing from.
+ scratchLen := cs.frameScratchBufferLen(maxFrameSize)
+ var buf []byte
+ index := bufPoolIndex(scratchLen)
+ if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
+ defer bufPools[index].Put(bp)
+ buf = *bp
+ } else {
+ buf = make([]byte, scratchLen)
+ defer bufPools[index].Put(&buf)
+ }
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf)
+ if hasContentLen {
+ remainLen -= int64(n)
+ if remainLen == 0 && err == nil {
+ // The request body's Content-Length was predeclared and
+ // we just finished reading it all, but the underlying io.Reader
+ // returned the final chunk with a nil error (which is one of
+ // the two valid things a Reader can do at EOF). Because we'd prefer
+ // to send the END_STREAM bit early, double-check that we're actually
+ // at EOF. Subsequent reads should return (0, EOF) at this point.
+ // If either value is different, we return an error in one of two ways below.
+ var scratch [1]byte
+ var n1 int
+ n1, err = body.Read(scratch[:])
+ remainLen -= int64(n1)
+ }
+ if remainLen < 0 {
+ err = errReqBodyTooLong
+ return err
+ }
+ }
+ if err != nil {
+ cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed != nil
+ cc.mu.Unlock()
+ switch {
+ case bodyClosed:
+ return errStopReqBodyWrite
+ case err == io.EOF:
+ sawEOF = true
+ err = nil
+ default:
+ return err
+ }
+ }
+
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
+ var allowed int32
+ allowed, err = cs.awaitFlowControl(len(remain))
+ if err != nil {
+ return err
+ }
+ cc.wmu.Lock()
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or
+ // opt-out? Use some heuristic on the body type? Nagel-like
+ // timers? Based on 'n'? Only last chunk of this for loop,
+ // unless flow control tokens are low? For now, always.
+ // If we change this, see comment below.
+ err = cc.bw.Flush()
+ }
+ cc.wmu.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ if sentEnd {
+ // Already sent END_STREAM (which implies we have no
+ // trailers) and flushed, because currently all
+ // WriteData frames above get a flush. So we're done.
+ return nil
+ }
+
+ // Since the RoundTrip contract permits the caller to "mutate or reuse"
+ // a request after the Response's Body is closed, verify that this hasn't
+ // happened before accessing the trailers.
+ cc.mu.Lock()
+ trailer := req.Trailer
+ err = cs.abortErr
+ cc.mu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ var trls []byte
+ if len(trailer) > 0 {
+ trls, err = cc.encodeTrailers(trailer)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Two ways to send END_STREAM: either with trailers, or
+ // with an empty DATA frame.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls)
+ } else {
+ err = cc.fr.WriteData(cs.ID, true, nil)
+ }
+ if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+ err = ferr
+ }
+ return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+ cc := cs.cc
+ ctx := cs.ctx
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if cc.closed {
+ return 0, errClientConnClosed
+ }
+ if cs.reqBodyClosed != nil {
+ return 0, errStopReqBodyWrite
+ }
+ select {
+ case <-cs.abort:
+ return 0, cs.abortErr
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ case <-cs.reqCancel:
+ return 0, errRequestCanceled
+ default:
+ }
+ if a := cs.flow.available(); a > 0 {
+ take := a
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
+ }
+ if take > int32(cc.maxFrameSize) {
+ take = int32(cc.maxFrameSize)
+ }
+ cs.flow.take(take)
+ return take, nil
+ }
+ cc.cond.Wait()
+ }
+}
+
+// requires cc.wmu be held.
+func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
+ cc.hbuf.Reset()
+
+ hlSize := uint64(0)
+ for k, vv := range trailer {
+ for _, v := range vv {
+ hf := hpack.HeaderField{Name: k, Value: v}
+ hlSize += uint64(hf.Size())
+ }
+ }
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, errRequestHeaderListSize
+ }
+
+ for k, vv := range trailer {
+ lowKey, ascii := httpcommon.LowerHeader(k)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ continue
+ }
+ // Transfer-Encoding, etc.. have already been filtered at the
+ // start of RoundTrip
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes(), nil
+}
+
+func (cc *ClientConn) writeHeader(name, value string) {
+ if VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+ _ incomparable
+ res *http.Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) addStreamLocked(cs *clientStream) {
+ cs.flow.add(int32(cc.initialWindowSize))
+ cs.flow.setConnFlow(&cc.flow)
+ cs.inflow.init(cc.initialStreamRecvWindowSize)
+ cs.ID = cc.nextStreamID
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ if cs.ID == 0 {
+ panic("assigned stream ID 0")
+ }
+}
+
+func (cc *ClientConn) forgetStreamID(id uint32) {
+ cc.mu.Lock()
+ slen := len(cc.streams)
+ delete(cc.streams, id)
+ if len(cc.streams) != slen-1 {
+ panic("forgetting unknown stream id")
+ }
+ cc.lastActive = cc.t.now()
+ if len(cc.streams) == 0 && cc.idleTimer != nil {
+ cc.idleTimer.Reset(cc.idleTimeout)
+ cc.lastIdle = cc.t.now()
+ }
+ // Wake up writeRequestBody via clientStream.awaitFlowControl and
+ // wake up RoundTrip if there is a pending request.
+ cc.cond.Broadcast()
+
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
+ if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
+ if VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2)
+ }
+ cc.closed = true
+ defer cc.closeConn()
+ }
+
+ cc.mu.Unlock()
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type clientConnReadLoop struct {
+ _ incomparable
+ cc *ClientConn
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *ClientConn) readLoop() {
+ cc.t.markNewGoroutine()
+ rl := &clientConnReadLoop{cc: cc}
+ defer rl.cleanup()
+ cc.readerErr = rl.run()
+ if ce, ok := cc.readerErr.(ConnectionError); ok {
+ cc.wmu.Lock()
+ cc.fr.WriteGoAway(0, ErrCode(ce), nil)
+ cc.wmu.Unlock()
+ }
+}
+
+// GoAwayError is returned by the Transport when the server closes the
+// TCP connection after sending a GOAWAY frame.
+type GoAwayError struct {
+ LastStreamID uint32
+ ErrCode ErrCode
+ DebugData string
+}
+
+func (e GoAwayError) Error() string {
+ return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
+ e.LastStreamID, e.ErrCode, e.DebugData)
+}
+
+func isEOFOrNetReadError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ ne, ok := err.(*net.OpError)
+ return ok && ne.Op == "read"
+}
+
+func (rl *clientConnReadLoop) cleanup() {
+ cc := rl.cc
+ defer cc.closeConn()
+ defer close(cc.readerDone)
+
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ err := cc.readerErr
+ cc.mu.Lock()
+ if cc.goAway != nil && isEOFOrNetReadError(err) {
+ err = GoAwayError{
+ LastStreamID: cc.goAway.LastStreamID,
+ ErrCode: cc.goAway.ErrCode,
+ DebugData: cc.goAwayDebug,
+ }
+ } else if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ cc.closed = true
+
+ // If the connection has never been used, and has been open for only a short time,
+ // leave it in the connection pool for a little while.
+ //
+ // This avoids a situation where new connections are constantly created,
+ // added to the pool, fail, and are removed from the pool, without any error
+ // being surfaced to the user.
+ unusedWaitTime := 5 * time.Second
+ if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
+ unusedWaitTime = cc.idleTimeout
+ }
+ idleTime := cc.t.now().Sub(cc.lastActive)
+ if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
+ cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.t.connPool().MarkDead(cc)
+ })
+ } else {
+ cc.mu.Unlock() // avoid any deadlocks in MarkDead
+ cc.t.connPool().MarkDead(cc)
+ cc.mu.Lock()
+ }
+
+ for _, cs := range cc.streams {
+ select {
+ case <-cs.peerClosed:
+ // The server closed the stream before closing the conn,
+ // so no need to interrupt it.
+ default:
+ cs.abortStreamLocked(err)
+ }
+ }
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+
+ if !cc.seenSettings {
+ // If we have a pending request that wants extended CONNECT,
+ // let it continue and fail with the connection error.
+ cc.extendedConnectAllowed = true
+ close(cc.seenSettingsChan)
+ }
+}
+
+// countReadFrameError calls Transport.CountError with a string
+// representing err.
+func (cc *ClientConn) countReadFrameError(err error) {
+ f := cc.t.CountError
+ if f == nil || err == nil {
+ return
+ }
+ if ce, ok := err.(ConnectionError); ok {
+ errCode := ErrCode(ce)
+ f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken()))
+ return
+ }
+ if errors.Is(err, io.EOF) {
+ f("read_frame_eof")
+ return
+ }
+ if errors.Is(err, io.ErrUnexpectedEOF) {
+ f("read_frame_unexpected_eof")
+ return
+ }
+ if errors.Is(err, ErrFrameTooLarge) {
+ f("read_frame_too_large")
+ return
+ }
+ f("read_frame_other")
+}
+
+func (rl *clientConnReadLoop) run() error {
+ cc := rl.cc
+ gotSettings := false
+ readIdleTimeout := cc.readIdleTimeout
+ var t timer
+ if readIdleTimeout != 0 {
+ t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
+ }
+ for {
+ f, err := cc.fr.ReadFrame()
+ if t != nil {
+ t.Reset(readIdleTimeout)
+ }
+ if err != nil {
+ cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
+ }
+ if se, ok := err.(StreamError); ok {
+ if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil {
+ if se.Cause == nil {
+ se.Cause = cc.fr.errDetail
+ }
+ rl.endStreamError(cs, se)
+ }
+ continue
+ } else if err != nil {
+ cc.countReadFrameError(err)
+ return err
+ }
+ if VerboseLogs {
+ cc.vlogf("http2: Transport received %s", summarizeFrame(f))
+ }
+ if !gotSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ cc.logf("protocol error: received %T before a SETTINGS frame", f)
+ return ConnectionError(ErrCodeProtocol)
+ }
+ gotSettings = true
+ }
+
+ switch f := f.(type) {
+ case *MetaHeadersFrame:
+ err = rl.processHeaders(f)
+ case *DataFrame:
+ err = rl.processData(f)
+ case *GoAwayFrame:
+ err = rl.processGoAway(f)
+ case *RSTStreamFrame:
+ err = rl.processResetStream(f)
+ case *SettingsFrame:
+ err = rl.processSettings(f)
+ case *PushPromiseFrame:
+ err = rl.processPushPromise(f)
+ case *WindowUpdateFrame:
+ err = rl.processWindowUpdate(f)
+ case *PingFrame:
+ err = rl.processPing(f)
+ default:
+ cc.logf("Transport: unhandled response frame type %T", f)
+ }
+ if err != nil {
+ if VerboseLogs {
+ cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
+ }
+ return err
+ }
+ }
+}
+
+func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
+ cs := rl.streamByID(f.StreamID, headerOrDataFrame)
+ if cs == nil {
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
+ return nil
+ }
+ if cs.readClosed {
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ Cause: errors.New("protocol error: headers after END_STREAM"),
+ })
+ return nil
+ }
+ if !cs.firstByte {
+ if cs.trace != nil {
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
+ traceFirstResponseByte(cs.trace)
+ }
+ cs.firstByte = true
+ }
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ return rl.processTrailers(cs, f)
+ }
+
+ res, err := rl.handleResponse(cs, f)
+ if err != nil {
+ if _, ok := err.(ConnectionError); ok {
+ return err
+ }
+ // Any other error type is a stream error.
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ Cause: err,
+ })
+ return nil // return nil from process* funcs to keep conn alive
+ }
+ if res == nil {
+ // (nil, nil) special case. See handleResponse docs.
+ return nil
+ }
+ cs.resTrailer = &res.Trailer
+ cs.res = res
+ close(cs.respHeaderRecv)
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 1xx responses).
+func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
+ if f.Truncated {
+ return nil, errResponseHeaderListSize
+ }
+
+ status := f.PseudoValue("status")
+ if status == "" {
+ return nil, errors.New("malformed response from server: missing status pseudo header")
+ }
+ statusCode, err := strconv.Atoi(status)
+ if err != nil {
+ return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
+ }
+
+ regularFields := f.RegularFields()
+ strs := make([]string, len(regularFields))
+ header := make(http.Header, len(regularFields))
+ res := &http.Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: header,
+ StatusCode: statusCode,
+ Status: status + " " + http.StatusText(statusCode),
+ }
+ for _, hf := range regularFields {
+ key := httpcommon.CanonicalHeader(hf.Name)
+ if key == "Trailer" {
+ t := res.Trailer
+ if t == nil {
+ t = make(http.Header)
+ res.Trailer = t
+ }
+ foreachHeaderElement(hf.Value, func(v string) {
+ t[httpcommon.CanonicalHeader(v)] = nil
+ })
+ } else {
+ vv := header[key]
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+ // Most headers aren't multi-valued.
+ // Set the capacity on strs[0] to 1, so any future append
+ // won't extend the slice into the other strings.
+ vv, strs = strs[:1:1], strs[1:]
+ vv[0] = hf.Value
+ header[key] = vv
+ } else {
+ header[key] = append(vv, hf.Value)
+ }
+ }
+ }
+
+ if statusCode >= 100 && statusCode <= 199 {
+ if f.StreamEnded() {
+ return nil, errors.New("1xx informational response with END_STREAM flag")
+ }
+ if fn := cs.get1xxTraceFunc(); fn != nil {
+ // If the 1xx response is being delivered to the user,
+ // then they're responsible for limiting the number
+ // of responses.
+ if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
+ return nil, err
+ }
+ } else {
+ // If the user didn't examine the 1xx response, then we
+ // limit the size of all 1xx headers.
+ //
+ // This differs a bit from the HTTP/1 implementation, which
+ // limits the size of all 1xx headers plus the final response.
+ // Use the larger limit of MaxHeaderListSize and
+ // net/http.Transport.MaxResponseHeaderBytes.
+ limit := int64(cs.cc.t.maxHeaderListSize())
+ if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit {
+ limit = t1.MaxResponseHeaderBytes
+ }
+ for _, h := range f.Fields {
+ cs.totalHeaderSize += int64(h.Size())
+ }
+ if cs.totalHeaderSize > limit {
+ if VerboseLogs {
+ log.Printf("http2: 1xx informational responses too large")
+ }
+ return nil, errors.New("header list too large")
+ }
+ }
+ if statusCode == 100 {
+ traceGot100Continue(cs.trace)
+ select {
+ case cs.on100 <- struct{}{}:
+ default:
+ }
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
+ }
+
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
+ res.ContentLength = int64(cl)
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ } else if f.StreamEnded() && !cs.isHead {
+ res.ContentLength = 0
+ }
+
+ if cs.isHead {
+ res.Body = noBody
+ return res, nil
+ }
+
+ if f.StreamEnded() {
+ if res.ContentLength > 0 {
+ res.Body = missingBody{}
+ } else {
+ res.Body = noBody
+ }
+ return res, nil
+ }
+
+ cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength})
+ cs.bytesRemain = res.ContentLength
+ res.Body = transportResponseBody{cs}
+
+ if cs.requestedGzip && asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &gzipReader{body: res.Body}
+ res.Uncompressed = true
+ }
+ return res, nil
+}
+
+func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !f.StreamEnded() {
+ // We expect that any headers for trailers also
+ // has END_STREAM.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if len(f.PseudoFields()) > 0 {
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ trailer := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ key := httpcommon.CanonicalHeader(hf.Name)
+ trailer[key] = append(trailer[key], hf.Value)
+ }
+ cs.trailer = trailer
+
+ rl.endStream(cs)
+ return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser.
+type transportResponseBody struct {
+ cs *clientStream
+}
+
+func (b transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
+ n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cs.abortStream(err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
+ if n == 0 {
+ // No flow control tokens to send back.
+ return
+ }
+
+ cc.mu.Lock()
+ connAdd := cc.inflow.add(n)
+ var streamAdd int32
+ if err == nil { // No need to refresh if the stream is over or failed.
+ streamAdd = cs.inflow.add(n)
+ }
+ cc.mu.Unlock()
+
+ if connAdd != 0 || streamAdd != 0 {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if connAdd != 0 {
+ cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
+ }
+ if streamAdd != 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
+ }
+ cc.bw.Flush()
+ }
+ return
+}
+
+var errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b transportResponseBody) Close() error {
+ cs := b.cs
+ cc := cs.cc
+
+ cs.bufPipe.BreakWithError(errClosedResponseBody)
+ cs.abortStream(errClosedResponseBody)
+
+ unread := cs.bufPipe.Len()
+ if unread > 0 {
+ cc.mu.Lock()
+ // Return connection-level flow control.
+ connAdd := cc.inflow.add(unread)
+ cc.mu.Unlock()
+
+ // TODO(dneil): Acquiring this mutex can block indefinitely.
+ // Move flow control return to a goroutine?
+ cc.wmu.Lock()
+ // Return connection-level flow control.
+ if connAdd > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(connAdd))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+
+ select {
+ case <-cs.donec:
+ case <-cs.ctx.Done():
+ // See golang/go#49366: The net/http package can cancel the
+ // request context after the response body is fully read.
+ // Don't treat this as an error.
+ return nil
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ }
+ return nil
+}
+
+func (rl *clientConnReadLoop) processData(f *DataFrame) error {
+ cc := rl.cc
+ cs := rl.streamByID(f.StreamID, headerOrDataFrame)
+ data := f.Data()
+ if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+
+ // But at least return their flow control:
+ if f.Length > 0 {
+ cc.mu.Lock()
+ ok := cc.inflow.take(f.Length)
+ connAdd := cc.inflow.add(int(f.Length))
+ cc.mu.Unlock()
+ if !ok {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ if connAdd > 0 {
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(connAdd))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+ }
+ return nil
+ }
+ if cs.readClosed {
+ cc.logf("protocol error: received DATA after END_STREAM")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
+ }
+ if !cs.pastHeaders {
+ cc.logf("protocol error: received DATA before a HEADERS frame")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
+ }
+ if f.Length > 0 {
+ if cs.isHead && len(data) > 0 {
+ cc.logf("protocol error: received DATA on a HEAD request")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
+ }
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if !takeInflows(&cc.inflow, &cs.inflow, f.Length) {
+ cc.mu.Unlock()
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ var refund int
+ if pad := int(f.Length) - len(data); pad > 0 {
+ refund += pad
+ }
+
+ didReset := false
+ var err error
+ if len(data) > 0 {
+ if _, err = cs.bufPipe.Write(data); err != nil {
+ // Return len(data) now if the stream is already closed,
+ // since data will never be read.
+ didReset = true
+ refund += len(data)
+ }
+ }
+
+ sendConn := cc.inflow.add(refund)
+ var sendStream int32
+ if !didReset {
+ sendStream = cs.inflow.add(refund)
+ }
+ cc.mu.Unlock()
+
+ if sendConn > 0 || sendStream > 0 {
+ cc.wmu.Lock()
+ if sendConn > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(sendConn))
+ }
+ if sendStream > 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+
+ if err != nil {
+ rl.endStreamError(cs, err)
+ return nil
+ }
+ }
+
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+func (rl *clientConnReadLoop) endStream(cs *clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ if !cs.readClosed {
+ cs.readClosed = true
+ // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a
+ // race condition: The caller can read io.EOF from Response.Body
+ // and close the body before we close cs.peerClosed, causing
+ // cleanupWriteRequest to send a RST_STREAM.
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers)
+ close(cs.peerClosed)
+ }
+}
+
+func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
+ cs.readAborted = true
+ cs.abortStream(err)
+}
+
+// Constants passed to streamByID for documentation purposes.
+const (
+ headerOrDataFrame = true
+ notHeaderOrDataFrame = false
+)
+
+// streamByID returns the stream with the given id, or nil if no stream has that id.
+// If headerOrData is true, it clears rst.StreamPingsBlocked.
+func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream {
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ if headerOrData {
+ // Work around an unfortunate gRPC behavior.
+ // See comment on ClientConn.rstStreamPingsBlocked for details.
+ rl.cc.rstStreamPingsBlocked = false
+ }
+ cs := rl.cc.streams[id]
+ if cs != nil && !cs.readAborted {
+ return cs
+ }
+ return nil
+}
+
+func (cs *clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(http.Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
+func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ if fn := cc.t.CountError; fn != nil {
+ fn("recv_goaway_" + f.ErrCode.stringToken())
+ }
+ }
+ cc.setGoAway(f)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
+ cc := rl.cc
+ // Locking both mu and wmu here allows frame encoding to read settings with only wmu held.
+ // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless.
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ if err := rl.processSettingsNoWrite(f); err != nil {
+ return err
+ }
+ if !f.IsAck() {
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+ }
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ if f.IsAck() {
+ if cc.wantSettingsAck {
+ cc.wantSettingsAck = false
+ return nil
+ }
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ var seenMaxConcurrentStreams bool
+ err := f.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ seenMaxConcurrentStreams = true
+ case SettingMaxHeaderListSize:
+ cc.peerMaxHeaderListSize = uint64(s.Val)
+ case SettingInitialWindowSize:
+ // Values above the maximum flow-control
+ // window size of 2^31-1 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ if s.Val > math.MaxInt32 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+
+ // Adjust flow control of currently-open
+ // frames by the difference of the old initial
+ // window size and this one.
+ delta := int32(s.Val) - int32(cc.initialWindowSize)
+ for _, cs := range cc.streams {
+ cs.flow.add(delta)
+ }
+ cc.cond.Broadcast()
+
+ cc.initialWindowSize = s.Val
+ case SettingHeaderTableSize:
+ cc.henc.SetMaxDynamicTableSize(s.Val)
+ cc.peerMaxHeaderTableSize = s.Val
+ case SettingEnableConnectProtocol:
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL,
+ // we require that it do so in the first SETTINGS frame.
+ //
+ // When we attempt to use extended CONNECT, we wait for the first
+ // SETTINGS frame to see if the server supports it. If we let the
+ // server enable the feature with a later SETTINGS frame, then
+ // users will see inconsistent results depending on whether we've
+ // seen that frame or not.
+ if !cc.seenSettings {
+ cc.extendedConnectAllowed = s.Val == 1
+ }
+ default:
+ cc.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if !cc.seenSettings {
+ if !seenMaxConcurrentStreams {
+ // This was the servers initial SETTINGS frame and it
+ // didn't contain a MAX_CONCURRENT_STREAMS field so
+ // increase the number of concurrent streams this
+ // connection can establish to our default.
+ cc.maxConcurrentStreams = defaultMaxConcurrentStreams
+ }
+ close(cc.seenSettingsChan)
+ cc.seenSettings = true
+ }
+
+ return nil
+}
+
+func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
+ cc := rl.cc
+ cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame)
+ if f.StreamID != 0 && cs == nil {
+ return nil
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ fl := &cc.flow
+ if cs != nil {
+ fl = &cs.flow
+ }
+ if !fl.add(int32(f.Increment)) {
+ // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR
+ if cs != nil {
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeFlowControl,
+ })
+ return nil
+ }
+
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.cond.Broadcast()
+ return nil
+}
+
+func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
+ cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame)
+ if cs == nil {
+ // TODO: return error if server tries to RST_STREAM an idle stream
+ return nil
+ }
+ serr := streamError(cs.ID, f.ErrCode)
+ serr.Cause = errFromPeer
+ if f.ErrCode == ErrCodeProtocol {
+ rl.cc.SetDoNotReuse()
+ }
+ if fn := cs.cc.t.CountError; fn != nil {
+ fn("recv_rststream_" + f.ErrCode.stringToken())
+ }
+ cs.abortStream(serr)
+
+ cs.bufPipe.CloseWithError(serr)
+ return nil
+}
+
+// Ping sends a PING frame to the server and waits for the ack.
+func (cc *ClientConn) Ping(ctx context.Context) error {
+ c := make(chan struct{})
+ // Generate a random payload
+ var p [8]byte
+ for {
+ if _, err := rand.Read(p[:]); err != nil {
+ return err
+ }
+ cc.mu.Lock()
+ // check for dup before insert
+ if _, found := cc.pings[p]; !found {
+ cc.pings[p] = c
+ cc.mu.Unlock()
+ break
+ }
+ cc.mu.Unlock()
+ }
+ var pingError error
+ errc := make(chan struct{})
+ go func() {
+ cc.t.markNewGoroutine()
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if pingError = cc.fr.WritePing(false, p); pingError != nil {
+ close(errc)
+ return
+ }
+ if pingError = cc.bw.Flush(); pingError != nil {
+ close(errc)
+ return
+ }
+ }()
+ select {
+ case <-c:
+ return nil
+ case <-errc:
+ return pingError
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.readerDone:
+ // connection closed
+ return cc.readerErr
+ }
+}
+
+func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
+ if f.IsAck() {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ // If ack, notify listener if any
+ if c, ok := cc.pings[f.Data]; ok {
+ close(c)
+ delete(cc.pings, f.Data)
+ }
+ if cc.pendingResets > 0 {
+ // See clientStream.cleanupWriteRequest.
+ cc.pendingResets = 0
+ cc.rstStreamPingsBlocked = true
+ cc.cond.Broadcast()
+ }
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
+func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+}
+
+// writeStreamReset sends a RST_STREAM frame.
+// When ping is true, it also sends a PING frame with a random payload.
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) {
+ // TODO: map err to more interesting error codes, once the
+ // HTTP community comes up with some. But currently for
+ // RST_STREAM there's no equivalent to GOAWAY frame's debug
+ // data, and the error codes are all pretty vague ("cancel").
+ cc.wmu.Lock()
+ cc.fr.WriteRSTStream(streamID, code)
+ if ping {
+ var payload [8]byte
+ rand.Read(payload[:])
+ cc.fr.WritePing(false, payload)
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+}
+
+var (
+ errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize
+)
+
+func (cc *ClientConn) logf(format string, args ...interface{}) {
+ cc.t.logf(format, args...)
+}
+
+func (cc *ClientConn) vlogf(format string, args ...interface{}) {
+ cc.t.vlogf(format, args...)
+}
+
+func (t *Transport) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ t.logf(format, args...)
+ }
+}
+
+func (t *Transport) logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+var noBody io.ReadCloser = noBodyReader{}
+
+type noBodyReader struct{}
+
+func (noBodyReader) Close() error { return nil }
+func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF }
+
+type missingBody struct{}
+
+func (missingBody) Close() error { return nil }
+func (missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF }
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type erringRoundTripper struct{ err error }
+
+func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
+func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ _ incomparable
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ if err := gz.body.Close(); err != nil {
+ return err
+ }
+ gz.zerr = fs.ErrClosed
+ return nil
+}
+
+type errorReader struct{ err error }
+
+func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// isConnectionCloseRequest reports whether req should use its own
+// connection for a single request and then close the connection.
+func isConnectionCloseRequest(req *http.Request) bool {
+ return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// converting panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+// (The field is exported so it can be accessed via reflect from net/http; tested
+// by TestNoDialH2RoundTripperType)
+type noDialH2RoundTripper struct{ *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ res, err := rt.Transport.RoundTrip(req)
+ if isNoCachedConnError(err) {
+ return nil, http.ErrSkipAltProtocol
+ }
+ return res, err
+}
+
+func (t *Transport) idleConnTimeout() time.Duration {
+ // to keep things backwards compatible, we use non-zero values of
+ // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying
+ // http1 transport, followed by 0
+ if t.IdleConnTimeout != 0 {
+ return t.IdleConnTimeout
+ }
+
+ if t.t1 != nil {
+ return t.t1.IdleConnTimeout
+ }
+
+ return 0
+}
+
+func traceGetConn(req *http.Request, hostPort string) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GetConn == nil {
+ return
+ }
+ trace.GetConn(hostPort)
+}
+
+func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ ci.Reused = reused
+ cc.mu.Lock()
+ ci.WasIdle = len(cc.streams) == 0 && reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = cc.t.timeSince(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func traceWroteHeaders(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func traceGot100Continue(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func traceWait100Continue(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func traceWroteRequest(trace *httptrace.ClientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func traceFirstResponseByte(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
+}
+
+func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
+ if trace != nil {
+ return trace.Got1xxResponse
+ }
+ return nil
+}
+
+// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
+// connection.
+func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
+ dialer := &tls.Dialer{
+ Config: cfg,
+ }
+ cn, err := dialer.DialContext(ctx, network, addr)
+ if err != nil {
+ return nil, err
+ }
+ tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
+ return tlsCn, nil
+}
diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go
new file mode 100644
index 0000000..b2de211
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/unencrypted.go
@@ -0,0 +1,32 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+)
+
+const nextProtoUnencryptedHTTP2 = "unencrypted_http2"
+
+// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn.
+//
+// TLSNextProto functions accept a *tls.Conn.
+//
+// When passing an unencrypted HTTP/2 connection to a TLSNextProto function,
+// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection.
+// To be extra careful about mistakes (accidentally dropping TLS encryption in a place
+// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method
+// that returns the actual connection we want to use.
+func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) {
+ conner, ok := tc.NetConn().(interface {
+ UnencryptedNetConn() net.Conn
+ })
+ if !ok {
+ return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff")
+ }
+ return conner.UnencryptedNetConn(), nil
+}
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
new file mode 100644
index 0000000..fdb35b9
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -0,0 +1,381 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/internal/httpcommon"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+ writeFrame(writeContext) error
+
+ // staysWithinBuffer reports whether this writer promises that
+ // it will only write less than or equal to size bytes, and it
+ // won't Flush the write context.
+ staysWithinBuffer(size int) bool
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type writeContext interface {
+ Framer() *Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// writeEndsStream reports whether w writes a frame that will transition
+// the stream to a half-closed local state. This returns false for RST_STREAM,
+// which closes the entire stream (not just the local half).
+func writeEndsStream(w writeFramer) bool {
+ switch v := w.(type) {
+ case *writeData:
+ return v.endStream
+ case *writeResHeaders:
+ return v.endStream
+ case nil:
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
+ panic("writeEndsStream called on nil writeFramer")
+ }
+ return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+ return ctx.Flush()
+}
+
+func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
+
+type writeSettings []Setting
+
+func (s writeSettings) staysWithinBuffer(max int) bool {
+ const settingSize = 6 // uint16 + uint32
+ return frameHeaderLen+settingSize*len(s) <= max
+
+}
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+ maxStreamID uint32
+ code ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ return err
+}
+
+func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
+
+type writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+func (w *writeData) staysWithinBuffer(max int) bool {
+ return frameHeaderLen+len(w.p) <= max
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
+}
+
+func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
+type writePing struct {
+ data [8]byte
+}
+
+func (w writePing) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(false, w.data)
+}
+
+func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
+
+// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
+// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
+// for the first/last fragment, respectively.
+func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
+ return err
+ }
+ first = false
+ }
+ return nil
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type writeResHeaders struct {
+ streamID uint32
+ httpResCode int // 0 means no ":status" line
+ h http.Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
+ endStream bool
+
+ date string
+ contentType string
+ contentLength string
+}
+
+func encKV(enc *hpack.Encoder, k, v string) {
+ if VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *writeResHeaders) staysWithinBuffer(max int) bool {
+ // TODO: this is a common one. It'd be nice to return true
+ // here and get into the fast path if we could be clever and
+ // calculate the size fast enough, or at least a conservative
+ // upper bound that usually fires. (Maybe if w.h and
+ // w.trailers are nil, so we don't need to enumerate it.)
+ // Otherwise I'm afraid that just calculating the length to
+ // answer this question would be slower than the ~2µs benefit.
+ return false
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ if w.httpResCode != 0 {
+ encKV(enc, ":status", httpCodeString(w.httpResCode))
+ }
+
+ encodeHeaders(enc, w.h, w.trailers)
+
+ if w.contentType != "" {
+ encKV(enc, "content-type", w.contentType)
+ }
+ if w.contentLength != "" {
+ encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ encKV(enc, "date", w.date)
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 && w.trailers == nil {
+ panic("unexpected empty hpack")
+ }
+
+ return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
+type writePushPromise struct {
+ streamID uint32 // pusher stream
+ method string // for :method
+ url *url.URL // for :scheme, :authority, :path
+ h http.Header
+
+ // Creates an ID for a pushed stream. This runs on serveG just before
+ // the frame is written. The returned ID is copied to promisedID.
+ allocatePromisedID func() (uint32, error)
+ promisedID uint32
+}
+
+func (w *writePushPromise) staysWithinBuffer(max int) bool {
+ // TODO: see writeResHeaders.staysWithinBuffer
+ return false
+}
+
+func (w *writePushPromise) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ encKV(enc, ":method", w.method)
+ encKV(enc, ":scheme", w.url.Scheme)
+ encKV(enc, ":authority", w.url.Host)
+ encKV(enc, ":path", w.url.RequestURI())
+ encodeHeaders(enc, w.h, nil)
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 {
+ panic("unexpected empty hpack")
+ }
+
+ return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WritePushPromise(PushPromiseParam{
+ StreamID: w.streamID,
+ PromiseID: w.promisedID,
+ BlockFragment: frag,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+type write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ encKV(enc, ":status", "100")
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
+ // Sloppy but conservative:
+ return 9+2*(len(":status")+len("100")) <= max
+}
+
+type writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
+// is encoded only if k is in keys.
+func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
+ if keys == nil {
+ sorter := sorterPool.Get().(*sorter)
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
+ defer sorterPool.Put(sorter)
+ keys = sorter.Keys(h)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k, ascii := httpcommon.LowerHeader(k)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ continue
+ }
+ if !validWireHeaderFieldName(k) {
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ encKV(enc, k, v)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
new file mode 100644
index 0000000..cc893ad
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -0,0 +1,251 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "fmt"
+
+// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
+// Methods are never called concurrently.
+type WriteScheduler interface {
+ // OpenStream opens a new stream in the write scheduler.
+ // It is illegal to call this with streamID=0 or with a streamID that is
+ // already open -- the call may panic.
+ OpenStream(streamID uint32, options OpenStreamOptions)
+
+ // CloseStream closes a stream in the write scheduler. Any frames queued on
+ // this stream should be discarded. It is illegal to call this on a stream
+ // that is not open -- the call may panic.
+ CloseStream(streamID uint32)
+
+ // AdjustStream adjusts the priority of the given stream. This may be called
+ // on a stream that has not yet been opened or has been closed. Note that
+ // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
+ // https://tools.ietf.org/html/rfc7540#section-5.1
+ AdjustStream(streamID uint32, priority PriorityParam)
+
+ // Push queues a frame in the scheduler. In most cases, this will not be
+ // called with wr.StreamID()!=0 unless that stream is currently open. The one
+ // exception is RST_STREAM frames, which may be sent on idle or closed streams.
+ Push(wr FrameWriteRequest)
+
+ // Pop dequeues the next frame to write. Returns false if no frames can
+ // be written. Frames with a given wr.StreamID() are Pop'd in the same
+ // order they are Push'd, except RST_STREAM frames. No frames should be
+ // discarded except by CloseStream.
+ Pop() (wr FrameWriteRequest, ok bool)
+}
+
+// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
+type OpenStreamOptions struct {
+ // PusherID is zero if the stream was initiated by the client. Otherwise,
+ // PusherID names the stream that pushed the newly opened stream.
+ PusherID uint32
+}
+
+// FrameWriteRequest is a request to write a frame.
+type FrameWriteRequest struct {
+ // write is the interface value that does the writing, once the
+ // WriteScheduler has selected this frame to write. The write
+ // functions are all defined in write.go.
+ write writeFramer
+
+ // stream is the stream on which this frame will be written.
+ // nil for non-stream frames like PING and SETTINGS.
+ // nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
+ stream *stream
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// StreamID returns the id of the stream this frame will be written to.
+// 0 is used for non-stream frames such as PING and SETTINGS.
+func (wr FrameWriteRequest) StreamID() uint32 {
+ if wr.stream == nil {
+ if se, ok := wr.write.(StreamError); ok {
+ // (*serverConn).resetStream doesn't set
+ // stream because it doesn't necessarily have
+ // one. So special case this type of write
+ // message.
+ return se.StreamID
+ }
+ return 0
+ }
+ return wr.stream.id
+}
+
+// isControl reports whether wr is a control frame for MaxQueuedControlFrames
+// purposes. That includes non-stream frames and RST_STREAM frames.
+func (wr FrameWriteRequest) isControl() bool {
+ return wr.stream == nil
+}
+
+// DataSize returns the number of flow control bytes that must be consumed
+// to write this entire frame. This is 0 for non-DATA frames.
+func (wr FrameWriteRequest) DataSize() int {
+ if wd, ok := wr.write.(*writeData); ok {
+ return len(wd.p)
+ }
+ return 0
+}
+
+// Consume consumes min(n, available) bytes from this frame, where available
+// is the number of flow control bytes available on the stream. Consume returns
+// 0, 1, or 2 frames, where the integer return value gives the number of frames
+// returned.
+//
+// If flow control prevents consuming any bytes, this returns (_, _, 0). If
+// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
+// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
+// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
+// underlying stream's flow control budget.
+func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
+ var empty FrameWriteRequest
+
+ // Non-DATA frames are always consumed whole.
+ wd, ok := wr.write.(*writeData)
+ if !ok || len(wd.p) == 0 {
+ return wr, empty, 1
+ }
+
+ // Might need to split after applying limits.
+ allowed := wr.stream.flow.available()
+ if n < allowed {
+ allowed = n
+ }
+ if wr.stream.sc.maxFrameSize < allowed {
+ allowed = wr.stream.sc.maxFrameSize
+ }
+ if allowed <= 0 {
+ return empty, empty, 0
+ }
+ if len(wd.p) > int(allowed) {
+ wr.stream.flow.take(allowed)
+ consumed := FrameWriteRequest{
+ stream: wr.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: wd.p[:allowed],
+ // Even if the original had endStream set, there
+ // are bytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false.
+ endStream: false,
+ },
+ // Our caller is blocking on the final DATA frame, not
+ // this intermediate frame, so no need to wait.
+ done: nil,
+ }
+ rest := FrameWriteRequest{
+ stream: wr.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: wd.p[allowed:],
+ endStream: wd.endStream,
+ },
+ done: wr.done,
+ }
+ return consumed, rest, 2
+ }
+
+ // The frame is consumed whole.
+ // NB: This cast cannot overflow because allowed is <= math.MaxInt32.
+ wr.stream.flow.take(int32(len(wd.p)))
+ return wr, empty, 1
+}
+
+// String is for debugging only.
+func (wr FrameWriteRequest) String() string {
+ var des string
+ if s, ok := wr.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wr.write)
+ }
+ return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
+}
+
+// replyToWriter sends err to wr.done and panics if the send must block
+// This does nothing if wr.done is nil.
+func (wr *FrameWriteRequest) replyToWriter(err error) {
+ if wr.done == nil {
+ return
+ }
+ select {
+ case wr.done <- err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
+ }
+ wr.write = nil // prevent use (assume it's tainted after wr.done send)
+}
+
+// writeQueue is used by implementations of WriteScheduler.
+type writeQueue struct {
+ s []FrameWriteRequest
+ prev, next *writeQueue
+}
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wr FrameWriteRequest) {
+ q.s = append(q.s, wr)
+}
+
+func (q *writeQueue) shift() FrameWriteRequest {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wr := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = FrameWriteRequest{}
+ q.s = q.s[:len(q.s)-1]
+ return wr
+}
+
+// consume consumes up to n bytes from q.s[0]. If the frame is
+// entirely consumed, it is removed from the queue. If the frame
+// is partially consumed, the frame is kept with the consumed
+// bytes removed. Returns true iff any bytes were consumed.
+func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
+ if len(q.s) == 0 {
+ return FrameWriteRequest{}, false
+ }
+ consumed, rest, numresult := q.s[0].Consume(n)
+ switch numresult {
+ case 0:
+ return FrameWriteRequest{}, false
+ case 1:
+ q.shift()
+ case 2:
+ q.s[0] = rest
+ }
+ return consumed, true
+}
+
+type writeQueuePool []*writeQueue
+
+// put inserts an unused writeQueue into the pool.
+func (p *writeQueuePool) put(q *writeQueue) {
+ for i := range q.s {
+ q.s[i] = FrameWriteRequest{}
+ }
+ q.s = q.s[:0]
+ *p = append(*p, q)
+}
+
+// get returns an empty writeQueue.
+func (p *writeQueuePool) get() *writeQueue {
+ ln := len(*p)
+ if ln == 0 {
+ return new(writeQueue)
+ }
+ x := ln - 1
+ q := (*p)[x]
+ (*p)[x] = nil
+ *p = (*p)[:x]
+ return q
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
new file mode 100644
index 0000000..f678333
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -0,0 +1,451 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+ "sort"
+)
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const priorityDefaultWeight = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type PriorityWriteSchedulerConfig struct {
+ // MaxClosedNodesInTree controls the maximum number of closed streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // "It is possible for a stream to become closed while prioritization
+ // information ... is in transit. ... This potentially creates suboptimal
+ // prioritization, since the stream could be given a priority that is
+ // different from what is intended. To avoid these problems, an endpoint
+ // SHOULD retain stream prioritization state for a period after streams
+ // become closed. The longer state is retained, the lower the chance that
+ // streams are assigned incorrect or default priority values."
+ MaxClosedNodesInTree int
+
+ // MaxIdleNodesInTree controls the maximum number of idle streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // Similarly, streams that are in the "idle" state can be assigned
+ // priority or become a parent of other streams. This allows for the
+ // creation of a grouping node in the dependency tree, which enables
+ // more flexible expressions of priority. Idle streams begin with a
+ // default priority (Section 5.3.5).
+ MaxIdleNodesInTree int
+
+ // ThrottleOutOfOrderWrites enables write throttling to help ensure that
+ // data is delivered in priority order. This works around a race where
+ // stream B depends on stream A and both streams are about to call Write
+ // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+ // write as much data from B as possible, but this is suboptimal because A
+ // is a higher-priority stream. With throttling enabled, we write a small
+ // amount of data from B to minimize the amount of bandwidth that B can
+ // steal from A.
+ ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
+// If cfg is nil, default options are used.
+func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
+ if cfg == nil {
+ // For justification of these defaults, see:
+ // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+ cfg = &PriorityWriteSchedulerConfig{
+ MaxClosedNodesInTree: 10,
+ MaxIdleNodesInTree: 10,
+ ThrottleOutOfOrderWrites: false,
+ }
+ }
+
+ ws := &priorityWriteScheduler{
+ nodes: make(map[uint32]*priorityNode),
+ maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+ maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
+ enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
+ }
+ ws.nodes[0] = &ws.root
+ if cfg.ThrottleOutOfOrderWrites {
+ ws.writeThrottleLimit = 1024
+ } else {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ return ws
+}
+
+type priorityNodeState int
+
+const (
+ priorityNodeOpen priorityNodeState = iota
+ priorityNodeClosed
+ priorityNodeIdle
+)
+
+// priorityNode is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type priorityNode struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeState // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+
+ // These links form the priority tree.
+ parent *priorityNode
+ kids *priorityNode // start of the kids list
+ prev, next *priorityNode // doubly-linked list of siblings
+}
+
+func (n *priorityNode) setParent(parent *priorityNode) {
+ if n == parent {
+ panic("setParent to self")
+ }
+ if n.parent == parent {
+ return
+ }
+ // Unlink from current parent.
+ if parent := n.parent; parent != nil {
+ if n.prev == nil {
+ parent.kids = n.next
+ } else {
+ n.prev.next = n.next
+ }
+ if n.next != nil {
+ n.next.prev = n.prev
+ }
+ }
+ // Link to new parent.
+ // If parent=nil, remove n from the tree.
+ // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+ n.parent = parent
+ if parent == nil {
+ n.next = nil
+ n.prev = nil
+ } else {
+ n.next = parent.kids
+ n.prev = nil
+ if n.next != nil {
+ n.next.prev = n
+ }
+ parent.kids = n
+ }
+}
+
+func (n *priorityNode) addBytes(b int64) {
+ n.bytes += b
+ for ; n != nil; n = n.parent {
+ n.subtreeBytes += b
+ }
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this function returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+ if !n.q.empty() && f(n, openParent) {
+ return true
+ }
+ if n.kids == nil {
+ return false
+ }
+
+ // Don't consider the root "open" when updating openParent since
+ // we can't send data frames on the root stream (only control frames).
+ if n.id != 0 {
+ openParent = openParent || (n.state == priorityNodeOpen)
+ }
+
+ // Common case: only one kid or all kids have the same weight.
+ // Some clients don't use weights; other clients (like web browsers)
+ // use mostly-linear priority trees.
+ w := n.kids.weight
+ needSort := false
+ for k := n.kids.next; k != nil; k = k.next {
+ if k.weight != w {
+ needSort = true
+ break
+ }
+ }
+ if !needSort {
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Uncommon case: sort the child nodes. We remove the kids from the parent,
+ // then re-insert after sorting so we can reuse tmp for future sort calls.
+ *tmp = (*tmp)[:0]
+ for n.kids != nil {
+ *tmp = append(*tmp, n.kids)
+ n.kids.setParent(nil)
+ }
+ sort.Sort(sortPriorityNodeSiblings(*tmp))
+ for i := len(*tmp) - 1; i >= 0; i-- {
+ (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+ }
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+}
+
+type sortPriorityNodeSiblings []*priorityNode
+
+func (z sortPriorityNodeSiblings) Len() int { return len(z) }
+func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+ // Prefer the subtree that has sent fewer bytes relative to its weight.
+ // See sections 5.3.2 and 5.3.4.
+ wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ if bi == 0 && bk == 0 {
+ return wi >= wk
+ }
+ if bk == 0 {
+ return false
+ }
+ return bi/bk <= wi/wk
+}
+
+type priorityWriteScheduler struct {
+ // root is the root of the priority tree, where root.id = 0.
+ // The root queues control frames that are not associated with any stream.
+ root priorityNode
+
+ // nodes maps stream ids to priority tree nodes.
+ nodes map[uint32]*priorityNode
+
+ // maxID is the maximum stream id in nodes.
+ maxID uint32
+
+ // lists of nodes that have been closed or are idle, but are kept in
+ // the tree for improved prioritization. When the lengths exceed either
+ // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+ closedNodes, idleNodes []*priorityNode
+
+ // From the config.
+ maxClosedNodesInTree int
+ maxIdleNodesInTree int
+ writeThrottleLimit int32
+ enableWriteThrottle bool
+
+ // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+ tmp []*priorityNode
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // The stream may be currently idle but cannot be opened or closed.
+ if curr := ws.nodes[streamID]; curr != nil {
+ if curr.state != priorityNodeIdle {
+ panic(fmt.Sprintf("stream %d already opened", streamID))
+ }
+ curr.state = priorityNodeOpen
+ return
+ }
+
+ // RFC 7540, Section 5.3.5:
+ // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+ // Pushed streams initially depend on their associated stream. In both cases,
+ // streams are assigned a default weight of 16."
+ parent := ws.nodes[options.PusherID]
+ if parent == nil {
+ parent = &ws.root
+ }
+ n := &priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeight,
+ state: priorityNodeOpen,
+ }
+ n.setParent(parent)
+ ws.nodes[streamID] = n
+ if streamID > ws.maxID {
+ ws.maxID = streamID
+ }
+}
+
+func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+ if streamID == 0 {
+ panic("violation of WriteScheduler interface: cannot close stream 0")
+ }
+ if ws.nodes[streamID] == nil {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+ }
+ if ws.nodes[streamID].state != priorityNodeOpen {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+ }
+
+ n := ws.nodes[streamID]
+ n.state = priorityNodeClosed
+ n.addBytes(-n.bytes)
+
+ q := n.q
+ ws.queuePool.put(&q)
+ n.q.s = nil
+ if ws.maxClosedNodesInTree > 0 {
+ ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+ } else {
+ ws.removeNode(n)
+ }
+}
+
+func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+ if streamID == 0 {
+ panic("adjustPriority on root")
+ }
+
+ // If streamID does not exist, there are two cases:
+ // - A closed stream that has been removed (this will have ID <= maxID)
+ // - An idle stream that is being used for "grouping" (this will have ID > maxID)
+ n := ws.nodes[streamID]
+ if n == nil {
+ if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+ return
+ }
+ ws.maxID = streamID
+ n = &priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeight,
+ state: priorityNodeIdle,
+ }
+ n.setParent(&ws.root)
+ ws.nodes[streamID] = n
+ ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+ }
+
+ // Section 5.3.1: A dependency on a stream that is not currently in the tree
+ // results in that stream being given a default priority (Section 5.3.5).
+ parent := ws.nodes[priority.StreamDep]
+ if parent == nil {
+ n.setParent(&ws.root)
+ n.weight = priorityDefaultWeight
+ return
+ }
+
+ // Ignore if the client tries to make a node its own parent.
+ if n == parent {
+ return
+ }
+
+ // Section 5.3.3:
+ // "If a stream is made dependent on one of its own dependencies, the
+ // formerly dependent stream is first moved to be dependent on the
+ // reprioritized stream's previous parent. The moved dependency retains
+ // its weight."
+ //
+ // That is: if parent depends on n, move parent to depend on n.parent.
+ for x := parent.parent; x != nil; x = x.parent {
+ if x == n {
+ parent.setParent(n.parent)
+ break
+ }
+ }
+
+ // Section 5.3.3: The exclusive flag causes the stream to become the sole
+ // dependency of its parent stream, causing other dependencies to become
+ // dependent on the exclusive stream.
+ if priority.Exclusive {
+ k := parent.kids
+ for k != nil {
+ next := k.next
+ if k != n {
+ k.setParent(n)
+ }
+ k = next
+ }
+ }
+
+ n.setParent(parent)
+ n.weight = priority.Weight
+}
+
+func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
+ var n *priorityNode
+ if wr.isControl() {
+ n = &ws.root
+ } else {
+ id := wr.StreamID()
+ n = ws.nodes[id]
+ if n == nil {
+ // id is an idle or closed stream. wr should not be a HEADERS or
+ // DATA frame. In other case, we push wr onto the root, rather
+ // than creating a new priorityNode.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ n = &ws.root
+ }
+ }
+ n.q.push(wr)
+}
+
+func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+ limit := int32(math.MaxInt32)
+ if openParent {
+ limit = ws.writeThrottleLimit
+ }
+ wr, ok = n.q.consume(limit)
+ if !ok {
+ return false
+ }
+ n.addBytes(int64(wr.DataSize()))
+ // If B depends on A and B continuously has data available but A
+ // does not, gradually increase the throttling limit to allow B to
+ // steal more and more bandwidth from A.
+ if openParent {
+ ws.writeThrottleLimit += 1024
+ if ws.writeThrottleLimit < 0 {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ } else if ws.enableWriteThrottle {
+ ws.writeThrottleLimit = 1024
+ }
+ return true
+ })
+ return wr, ok
+}
+
+func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+ if maxSize == 0 {
+ return
+ }
+ if len(*list) == maxSize {
+ // Remove the oldest node, then shift left.
+ ws.removeNode((*list)[0])
+ x := (*list)[1:]
+ copy(*list, x)
+ *list = (*list)[:len(x)]
+ }
+ *list = append(*list, n)
+}
+
+func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+ for n.kids != nil {
+ n.kids.setParent(n.parent)
+ }
+ n.setParent(nil)
+ delete(ws.nodes, n.id)
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go
new file mode 100644
index 0000000..f2e55e0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_random.go
@@ -0,0 +1,77 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "math"
+
+// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
+// priorities. Control frames like SETTINGS and PING are written before DATA
+// frames, but if no control frames are queued and multiple streams have queued
+// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
+func NewRandomWriteScheduler() WriteScheduler {
+ return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
+}
+
+type randomWriteScheduler struct {
+ // zero are frames not associated with a specific stream.
+ zero writeQueue
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // When a stream is idle, closed, or emptied, it's deleted
+ // from the map.
+ sq map[uint32]*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // no-op: idle streams are not tracked
+}
+
+func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
+ q, ok := ws.sq[streamID]
+ if !ok {
+ return
+ }
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+ // no-op: priorities are ignored
+}
+
+func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.zero.push(wr)
+ return
+ }
+ id := wr.StreamID()
+ q, ok := ws.sq[id]
+ if !ok {
+ q = ws.queuePool.get()
+ ws.sq[id] = q
+ }
+ q.push(wr)
+}
+
+func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ // Iterate over all non-idle streams until finding one that can be consumed.
+ for streamID, q := range ws.sq {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ if q.empty() {
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+ }
+ return wr, true
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
new file mode 100644
index 0000000..54fe863
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -0,0 +1,119 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+)
+
+type roundRobinWriteScheduler struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control writeQueue
+
+ // streams maps stream ID to a queue.
+ streams map[uint32]*writeQueue
+
+ // stream queues are stored in a circular linked list.
+ // head is the next stream to write, or nil if there are no streams open.
+ head *writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+// newRoundRobinWriteScheduler constructs a new write scheduler.
+// The round robin scheduler priorizes control frames
+// like SETTINGS and PING over DATA frames.
+// When there are no control frames to send, it performs a round-robin
+// selection from the ready streams.
+func newRoundRobinWriteScheduler() WriteScheduler {
+ ws := &roundRobinWriteScheduler{
+ streams: make(map[uint32]*writeQueue),
+ }
+ return ws
+}
+
+func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ if ws.streams[streamID] != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = q
+ if ws.head == nil {
+ ws.head = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.head.prev
+ q.next = ws.head
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) {
+ q := ws.streams[streamID]
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.head = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.head == q {
+ ws.head = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {}
+
+func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()]
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+ if ws.head == nil {
+ return FrameWriteRequest{}, false
+ }
+ q := ws.head
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ ws.head = q.next
+ return wr, true
+ }
+ q = q.next
+ if q == ws.head {
+ break
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go
new file mode 100644
index 0000000..712f1ad
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/go118.go
@@ -0,0 +1,13 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+
+package idna
+
+// Transitional processing is disabled by default in Go 1.18.
+// https://golang.org/issue/47510
+const transitionalLookup = false
diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go
new file mode 100644
index 0000000..7b37178
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/idna10.0.0.go
@@ -0,0 +1,769 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.10
+
+// Package idna implements IDNA2008 using the compatibility processing
+// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
+// deal with the transition from IDNA2003.
+//
+// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
+// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
+// UTS #46 is defined in https://www.unicode.org/reports/tr46.
+// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
+// differences between these two standards.
+package idna // import "golang.org/x/net/idna"
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/text/secure/bidirule"
+ "golang.org/x/text/unicode/bidi"
+ "golang.org/x/text/unicode/norm"
+)
+
+// NOTE: Unlike common practice in Go APIs, the functions will return a
+// sanitized domain name in case of errors. Browsers sometimes use a partially
+// evaluated string as lookup.
+// TODO: the current error handling is, in my opinion, the least opinionated.
+// Other strategies are also viable, though:
+// Option 1) Return an empty string in case of error, but allow the user to
+// specify explicitly which errors to ignore.
+// Option 2) Return the partially evaluated string if it is itself a valid
+// string, otherwise return the empty string in case of error.
+// Option 3) Option 1 and 2.
+// Option 4) Always return an empty string for now and implement Option 1 as
+// needed, and document that the return string may not be empty in case of
+// error in the future.
+// I think Option 1 is best, but it is quite opinionated.
+
+// ToASCII is a wrapper for Punycode.ToASCII.
+func ToASCII(s string) (string, error) {
+ return Punycode.process(s, true)
+}
+
+// ToUnicode is a wrapper for Punycode.ToUnicode.
+func ToUnicode(s string) (string, error) {
+ return Punycode.process(s, false)
+}
+
+// An Option configures a Profile at creation time.
+type Option func(*options)
+
+// Transitional sets a Profile to use the Transitional mapping as defined in UTS
+// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
+// transitional mapping provides a compromise between IDNA2003 and IDNA2008
+// compatibility. It is used by some browsers when resolving domain names. This
+// option is only meaningful if combined with MapForLookup.
+func Transitional(transitional bool) Option {
+ return func(o *options) { o.transitional = transitional }
+}
+
+// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
+// are longer than allowed by the RFC.
+//
+// This option corresponds to the VerifyDnsLength flag in UTS #46.
+func VerifyDNSLength(verify bool) Option {
+ return func(o *options) { o.verifyDNSLength = verify }
+}
+
+// RemoveLeadingDots removes leading label separators. Leading runes that map to
+// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
+func RemoveLeadingDots(remove bool) Option {
+ return func(o *options) { o.removeLeadingDots = remove }
+}
+
+// ValidateLabels sets whether to check the mandatory label validation criteria
+// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
+// of hyphens ('-'), normalization, validity of runes, and the context rules.
+// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
+// in UTS #46.
+func ValidateLabels(enable bool) Option {
+ return func(o *options) {
+ // Don't override existing mappings, but set one that at least checks
+ // normalization if it is not set.
+ if o.mapping == nil && enable {
+ o.mapping = normalize
+ }
+ o.trie = trie
+ o.checkJoiners = enable
+ o.checkHyphens = enable
+ if enable {
+ o.fromPuny = validateFromPunycode
+ } else {
+ o.fromPuny = nil
+ }
+ }
+}
+
+// CheckHyphens sets whether to check for correct use of hyphens ('-') in
+// labels. Most web browsers do not have this option set, since labels such as
+// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
+//
+// This option corresponds to the CheckHyphens flag in UTS #46.
+func CheckHyphens(enable bool) Option {
+ return func(o *options) { o.checkHyphens = enable }
+}
+
+// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
+// A of RFC 5892, concerning the use of joiner runes.
+//
+// This option corresponds to the CheckJoiners flag in UTS #46.
+func CheckJoiners(enable bool) Option {
+ return func(o *options) {
+ o.trie = trie
+ o.checkJoiners = enable
+ }
+}
+
+// StrictDomainName limits the set of permissible ASCII characters to those
+// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
+// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
+// but is only useful if ValidateLabels is set.
+//
+// This option is useful, for instance, for browsers that allow characters
+// outside this range, for example a '_' (U+005F LOW LINE). See
+// http://www.rfc-editor.org/std/std3.txt for more details.
+//
+// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
+func StrictDomainName(use bool) Option {
+ return func(o *options) { o.useSTD3Rules = use }
+}
+
+// NOTE: the following options pull in tables. The tables should not be linked
+// in as long as the options are not used.
+
+// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
+// that relies on proper validation of labels should include this rule.
+//
+// This option corresponds to the CheckBidi flag in UTS #46.
+func BidiRule() Option {
+ return func(o *options) { o.bidirule = bidirule.ValidString }
+}
+
+// ValidateForRegistration sets validation options to verify that a given IDN is
+// properly formatted for registration as defined by Section 4 of RFC 5891.
+func ValidateForRegistration() Option {
+ return func(o *options) {
+ o.mapping = validateRegistration
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ VerifyDNSLength(true)(o)
+ BidiRule()(o)
+ }
+}
+
+// MapForLookup sets validation and mapping options such that a given IDN is
+// transformed for domain name lookup according to the requirements set out in
+// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
+// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
+// to add this check.
+//
+// The mappings include normalization and mapping case, width and other
+// compatibility mappings.
+func MapForLookup() Option {
+ return func(o *options) {
+ o.mapping = validateAndMap
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ }
+}
+
+type options struct {
+ transitional bool
+ useSTD3Rules bool
+ checkHyphens bool
+ checkJoiners bool
+ verifyDNSLength bool
+ removeLeadingDots bool
+
+ trie *idnaTrie
+
+ // fromPuny calls validation rules when converting A-labels to U-labels.
+ fromPuny func(p *Profile, s string) error
+
+ // mapping implements a validation and mapping step as defined in RFC 5895
+ // or UTS 46, tailored to, for example, domain registration or lookup.
+ mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
+
+ // bidirule, if specified, checks whether s conforms to the Bidi Rule
+ // defined in RFC 5893.
+ bidirule func(s string) bool
+}
+
+// A Profile defines the configuration of an IDNA mapper.
+type Profile struct {
+ options
+}
+
+func apply(o *options, opts []Option) {
+ for _, f := range opts {
+ f(o)
+ }
+}
+
+// New creates a new Profile.
+//
+// With no options, the returned Profile is the most permissive and equals the
+// Punycode Profile. Options can be passed to further restrict the Profile. The
+// MapForLookup and ValidateForRegistration options set a collection of options,
+// for lookup and registration purposes respectively, which can be tailored by
+// adding more fine-grained options, where later options override earlier
+// options.
+func New(o ...Option) *Profile {
+ p := &Profile{}
+ apply(&p.options, o)
+ return p
+}
+
+// ToASCII converts a domain or domain label to its ASCII form. For example,
+// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
+// ToASCII("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToASCII(s string) (string, error) {
+ return p.process(s, true)
+}
+
+// ToUnicode converts a domain or domain label to its Unicode form. For example,
+// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
+// ToUnicode("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToUnicode(s string) (string, error) {
+ pp := *p
+ pp.transitional = false
+ return pp.process(s, false)
+}
+
+// String reports a string with a description of the profile for debugging
+// purposes. The string format may change with different versions.
+func (p *Profile) String() string {
+ s := ""
+ if p.transitional {
+ s = "Transitional"
+ } else {
+ s = "NonTransitional"
+ }
+ if p.useSTD3Rules {
+ s += ":UseSTD3Rules"
+ }
+ if p.checkHyphens {
+ s += ":CheckHyphens"
+ }
+ if p.checkJoiners {
+ s += ":CheckJoiners"
+ }
+ if p.verifyDNSLength {
+ s += ":VerifyDNSLength"
+ }
+ return s
+}
+
+var (
+ // Punycode is a Profile that does raw punycode processing with a minimum
+ // of validation.
+ Punycode *Profile = punycode
+
+ // Lookup is the recommended profile for looking up domain names, according
+ // to Section 5 of RFC 5891. The exact configuration of this profile may
+ // change over time.
+ Lookup *Profile = lookup
+
+ // Display is the recommended profile for displaying domain names.
+ // The configuration of this profile may change over time.
+ Display *Profile = display
+
+ // Registration is the recommended profile for checking whether a given
+ // IDN is valid for registration, according to Section 4 of RFC 5891.
+ Registration *Profile = registration
+
+ punycode = &Profile{}
+ lookup = &Profile{options{
+ transitional: transitionalLookup,
+ useSTD3Rules: true,
+ checkHyphens: true,
+ checkJoiners: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ display = &Profile{options{
+ useSTD3Rules: true,
+ checkHyphens: true,
+ checkJoiners: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ registration = &Profile{options{
+ useSTD3Rules: true,
+ verifyDNSLength: true,
+ checkHyphens: true,
+ checkJoiners: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateRegistration,
+ bidirule: bidirule.ValidString,
+ }}
+
+ // TODO: profiles
+ // Register: recommended for approving domain names: don't do any mappings
+ // but rather reject on invalid input. Bundle or block deviation characters.
+)
+
+type labelError struct{ label, code_ string }
+
+func (e labelError) code() string { return e.code_ }
+func (e labelError) Error() string {
+ return fmt.Sprintf("idna: invalid label %q", e.label)
+}
+
+type runeError rune
+
+func (e runeError) code() string { return "P1" }
+func (e runeError) Error() string {
+ return fmt.Sprintf("idna: disallowed rune %U", e)
+}
+
+// process implements the algorithm described in section 4 of UTS #46,
+// see https://www.unicode.org/reports/tr46.
+func (p *Profile) process(s string, toASCII bool) (string, error) {
+ var err error
+ var isBidi bool
+ if p.mapping != nil {
+ s, isBidi, err = p.mapping(p, s)
+ }
+ // Remove leading empty labels.
+ if p.removeLeadingDots {
+ for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
+ }
+ }
+ // TODO: allow for a quick check of the tables data.
+ // It seems like we should only create this error on ToASCII, but the
+ // UTS 46 conformance tests suggests we should always check this.
+ if err == nil && p.verifyDNSLength && s == "" {
+ err = &labelError{s, "A4"}
+ }
+ labels := labelIter{orig: s}
+ for ; !labels.done(); labels.next() {
+ label := labels.label()
+ if label == "" {
+ // Empty labels are not okay. The label iterator skips the last
+ // label if it is empty.
+ if err == nil && p.verifyDNSLength {
+ err = &labelError{s, "A4"}
+ }
+ continue
+ }
+ if strings.HasPrefix(label, acePrefix) {
+ u, err2 := decode(label[len(acePrefix):])
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ // Spec says keep the old label.
+ continue
+ }
+ isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
+ labels.set(u)
+ if err == nil && p.fromPuny != nil {
+ err = p.fromPuny(p, u)
+ }
+ if err == nil {
+ // This should be called on NonTransitional, according to the
+ // spec, but that currently does not have any effect. Use the
+ // original profile to preserve options.
+ err = p.validateLabel(u)
+ }
+ } else if err == nil {
+ err = p.validateLabel(label)
+ }
+ }
+ if isBidi && p.bidirule != nil && err == nil {
+ for labels.reset(); !labels.done(); labels.next() {
+ if !p.bidirule(labels.label()) {
+ err = &labelError{s, "B"}
+ break
+ }
+ }
+ }
+ if toASCII {
+ for labels.reset(); !labels.done(); labels.next() {
+ label := labels.label()
+ if !ascii(label) {
+ a, err2 := encode(acePrefix, label)
+ if err == nil {
+ err = err2
+ }
+ label = a
+ labels.set(a)
+ }
+ n := len(label)
+ if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
+ err = &labelError{label, "A4"}
+ }
+ }
+ }
+ s = labels.result()
+ if toASCII && p.verifyDNSLength && err == nil {
+ // Compute the length of the domain name minus the root label and its dot.
+ n := len(s)
+ if n > 0 && s[n-1] == '.' {
+ n--
+ }
+ if len(s) < 1 || n > 253 {
+ err = &labelError{s, "A4"}
+ }
+ }
+ return s, err
+}
+
+func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
+ // TODO: consider first doing a quick check to see if any of these checks
+ // need to be done. This will make it slower in the general case, but
+ // faster in the common case.
+ mapped = norm.NFC.String(s)
+ isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
+ return mapped, isBidi, nil
+}
+
+func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
+ // TODO: filter need for normalization in loop below.
+ if !norm.NFC.IsNormalString(s) {
+ return s, false, &labelError{s, "V1"}
+ }
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if sz == 0 {
+ return s, bidi, runeError(utf8.RuneError)
+ }
+ bidi = bidi || info(v).isBidi(s[i:])
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ // TODO: handle the NV8 defined in the Unicode idna data set to allow
+ // for strict conformance to IDNA2008.
+ case valid, deviation:
+ case disallowed, mapped, unknown, ignored:
+ r, _ := utf8.DecodeRuneInString(s[i:])
+ return s, bidi, runeError(r)
+ }
+ i += sz
+ }
+ return s, bidi, nil
+}
+
+func (c info) isBidi(s string) bool {
+ if !c.isMapped() {
+ return c&attributesMask == rtl
+ }
+ // TODO: also store bidi info for mapped data. This is possible, but a bit
+ // cumbersome and not for the common case.
+ p, _ := bidi.LookupString(s)
+ switch p.Class() {
+ case bidi.R, bidi.AL, bidi.AN:
+ return true
+ }
+ return false
+}
+
+func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
+ var (
+ b []byte
+ k int
+ )
+ // combinedInfoBits contains the or-ed bits of all runes. We use this
+ // to derive the mayNeedNorm bit later. This may trigger normalization
+ // overeagerly, but it will not do so in the common case. The end result
+ // is another 10% saving on BenchmarkProfile for the common case.
+ var combinedInfoBits info
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if sz == 0 {
+ b = append(b, s[k:i]...)
+ b = append(b, "\ufffd"...)
+ k = len(s)
+ if err == nil {
+ err = runeError(utf8.RuneError)
+ }
+ break
+ }
+ combinedInfoBits |= info(v)
+ bidi = bidi || info(v).isBidi(s[i:])
+ start := i
+ i += sz
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ case valid:
+ continue
+ case disallowed:
+ if err == nil {
+ r, _ := utf8.DecodeRuneInString(s[start:])
+ err = runeError(r)
+ }
+ continue
+ case mapped, deviation:
+ b = append(b, s[k:start]...)
+ b = info(v).appendMapping(b, s[start:i])
+ case ignored:
+ b = append(b, s[k:start]...)
+ // drop the rune
+ case unknown:
+ b = append(b, s[k:start]...)
+ b = append(b, "\ufffd"...)
+ }
+ k = i
+ }
+ if k == 0 {
+ // No changes so far.
+ if combinedInfoBits&mayNeedNorm != 0 {
+ s = norm.NFC.String(s)
+ }
+ } else {
+ b = append(b, s[k:]...)
+ if norm.NFC.QuickSpan(b) != len(b) {
+ b = norm.NFC.Bytes(b)
+ }
+ // TODO: the punycode converters require strings as input.
+ s = string(b)
+ }
+ return s, bidi, err
+}
+
+// A labelIter allows iterating over domain name labels.
+type labelIter struct {
+ orig string
+ slice []string
+ curStart int
+ curEnd int
+ i int
+}
+
+func (l *labelIter) reset() {
+ l.curStart = 0
+ l.curEnd = 0
+ l.i = 0
+}
+
+func (l *labelIter) done() bool {
+ return l.curStart >= len(l.orig)
+}
+
+func (l *labelIter) result() string {
+ if l.slice != nil {
+ return strings.Join(l.slice, ".")
+ }
+ return l.orig
+}
+
+func (l *labelIter) label() string {
+ if l.slice != nil {
+ return l.slice[l.i]
+ }
+ p := strings.IndexByte(l.orig[l.curStart:], '.')
+ l.curEnd = l.curStart + p
+ if p == -1 {
+ l.curEnd = len(l.orig)
+ }
+ return l.orig[l.curStart:l.curEnd]
+}
+
+// next sets the value to the next label. It skips the last label if it is empty.
+func (l *labelIter) next() {
+ l.i++
+ if l.slice != nil {
+ if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
+ l.curStart = len(l.orig)
+ }
+ } else {
+ l.curStart = l.curEnd + 1
+ if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
+ l.curStart = len(l.orig)
+ }
+ }
+}
+
+func (l *labelIter) set(s string) {
+ if l.slice == nil {
+ l.slice = strings.Split(l.orig, ".")
+ }
+ l.slice[l.i] = s
+}
+
+// acePrefix is the ASCII Compatible Encoding prefix.
+const acePrefix = "xn--"
+
+func (p *Profile) simplify(cat category) category {
+ switch cat {
+ case disallowedSTD3Mapped:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = mapped
+ }
+ case disallowedSTD3Valid:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = valid
+ }
+ case deviation:
+ if !p.transitional {
+ cat = valid
+ }
+ case validNV8, validXV8:
+ // TODO: handle V2008
+ cat = valid
+ }
+ return cat
+}
+
+func validateFromPunycode(p *Profile, s string) error {
+ if !norm.NFC.IsNormalString(s) {
+ return &labelError{s, "V1"}
+ }
+ // TODO: detect whether string may have to be normalized in the following
+ // loop.
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if sz == 0 {
+ return runeError(utf8.RuneError)
+ }
+ if c := p.simplify(info(v).category()); c != valid && c != deviation {
+ return &labelError{s, "V6"}
+ }
+ i += sz
+ }
+ return nil
+}
+
+const (
+ zwnj = "\u200c"
+ zwj = "\u200d"
+)
+
+type joinState int8
+
+const (
+ stateStart joinState = iota
+ stateVirama
+ stateBefore
+ stateBeforeVirama
+ stateAfter
+ stateFAIL
+)
+
+var joinStates = [][numJoinTypes]joinState{
+ stateStart: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateVirama,
+ },
+ stateVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ },
+ stateBefore: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ joinZWNJ: stateAfter,
+ joinZWJ: stateFAIL,
+ joinVirama: stateBeforeVirama,
+ },
+ stateBeforeVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ },
+ stateAfter: {
+ joiningL: stateFAIL,
+ joiningD: stateBefore,
+ joiningT: stateAfter,
+ joiningR: stateStart,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateAfter, // no-op as we can't accept joiners here
+ },
+ stateFAIL: {
+ 0: stateFAIL,
+ joiningL: stateFAIL,
+ joiningD: stateFAIL,
+ joiningT: stateFAIL,
+ joiningR: stateFAIL,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateFAIL,
+ },
+}
+
+// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
+// already implicitly satisfied by the overall implementation.
+func (p *Profile) validateLabel(s string) (err error) {
+ if s == "" {
+ if p.verifyDNSLength {
+ return &labelError{s, "A4"}
+ }
+ return nil
+ }
+ if p.checkHyphens {
+ if len(s) > 4 && s[2] == '-' && s[3] == '-' {
+ return &labelError{s, "V2"}
+ }
+ if s[0] == '-' || s[len(s)-1] == '-' {
+ return &labelError{s, "V3"}
+ }
+ }
+ if !p.checkJoiners {
+ return nil
+ }
+ trie := p.trie // p.checkJoiners is only set if trie is set.
+ // TODO: merge the use of this in the trie.
+ v, sz := trie.lookupString(s)
+ x := info(v)
+ if x.isModifier() {
+ return &labelError{s, "V5"}
+ }
+ // Quickly return in the absence of zero-width (non) joiners.
+ if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
+ return nil
+ }
+ st := stateStart
+ for i := 0; ; {
+ jt := x.joinType()
+ if s[i:i+sz] == zwj {
+ jt = joinZWJ
+ } else if s[i:i+sz] == zwnj {
+ jt = joinZWNJ
+ }
+ st = joinStates[st][jt]
+ if x.isViramaModifier() {
+ st = joinStates[st][joinVirama]
+ }
+ if i += sz; i == len(s) {
+ break
+ }
+ v, sz = trie.lookupString(s[i:])
+ x = info(v)
+ }
+ if st == stateFAIL || st == stateAfter {
+ return &labelError{s, "C"}
+ }
+ return nil
+}
+
+func ascii(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go
new file mode 100644
index 0000000..cc6a892
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/idna9.0.0.go
@@ -0,0 +1,717 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.10
+
+// Package idna implements IDNA2008 using the compatibility processing
+// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
+// deal with the transition from IDNA2003.
+//
+// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
+// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
+// UTS #46 is defined in https://www.unicode.org/reports/tr46.
+// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
+// differences between these two standards.
+package idna // import "golang.org/x/net/idna"
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/text/secure/bidirule"
+ "golang.org/x/text/unicode/norm"
+)
+
+// NOTE: Unlike common practice in Go APIs, the functions will return a
+// sanitized domain name in case of errors. Browsers sometimes use a partially
+// evaluated string as lookup.
+// TODO: the current error handling is, in my opinion, the least opinionated.
+// Other strategies are also viable, though:
+// Option 1) Return an empty string in case of error, but allow the user to
+// specify explicitly which errors to ignore.
+// Option 2) Return the partially evaluated string if it is itself a valid
+// string, otherwise return the empty string in case of error.
+// Option 3) Option 1 and 2.
+// Option 4) Always return an empty string for now and implement Option 1 as
+// needed, and document that the return string may not be empty in case of
+// error in the future.
+// I think Option 1 is best, but it is quite opinionated.
+
+// ToASCII is a wrapper for Punycode.ToASCII.
+func ToASCII(s string) (string, error) {
+ return Punycode.process(s, true)
+}
+
+// ToUnicode is a wrapper for Punycode.ToUnicode.
+func ToUnicode(s string) (string, error) {
+ return Punycode.process(s, false)
+}
+
+// An Option configures a Profile at creation time.
+type Option func(*options)
+
+// Transitional sets a Profile to use the Transitional mapping as defined in UTS
+// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
+// transitional mapping provides a compromise between IDNA2003 and IDNA2008
+// compatibility. It is used by some browsers when resolving domain names. This
+// option is only meaningful if combined with MapForLookup.
+func Transitional(transitional bool) Option {
+ return func(o *options) { o.transitional = transitional }
+}
+
+// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
+// are longer than allowed by the RFC.
+//
+// This option corresponds to the VerifyDnsLength flag in UTS #46.
+func VerifyDNSLength(verify bool) Option {
+ return func(o *options) { o.verifyDNSLength = verify }
+}
+
+// RemoveLeadingDots removes leading label separators. Leading runes that map to
+// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
+func RemoveLeadingDots(remove bool) Option {
+ return func(o *options) { o.removeLeadingDots = remove }
+}
+
+// ValidateLabels sets whether to check the mandatory label validation criteria
+// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
+// of hyphens ('-'), normalization, validity of runes, and the context rules.
+// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
+// in UTS #46.
+func ValidateLabels(enable bool) Option {
+ return func(o *options) {
+ // Don't override existing mappings, but set one that at least checks
+ // normalization if it is not set.
+ if o.mapping == nil && enable {
+ o.mapping = normalize
+ }
+ o.trie = trie
+ o.checkJoiners = enable
+ o.checkHyphens = enable
+ if enable {
+ o.fromPuny = validateFromPunycode
+ } else {
+ o.fromPuny = nil
+ }
+ }
+}
+
+// CheckHyphens sets whether to check for correct use of hyphens ('-') in
+// labels. Most web browsers do not have this option set, since labels such as
+// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
+//
+// This option corresponds to the CheckHyphens flag in UTS #46.
+func CheckHyphens(enable bool) Option {
+ return func(o *options) { o.checkHyphens = enable }
+}
+
+// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
+// A of RFC 5892, concerning the use of joiner runes.
+//
+// This option corresponds to the CheckJoiners flag in UTS #46.
+func CheckJoiners(enable bool) Option {
+ return func(o *options) {
+ o.trie = trie
+ o.checkJoiners = enable
+ }
+}
+
+// StrictDomainName limits the set of permissible ASCII characters to those
+// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
+// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
+// but is only useful if ValidateLabels is set.
+//
+// This option is useful, for instance, for browsers that allow characters
+// outside this range, for example a '_' (U+005F LOW LINE). See
+// http://www.rfc-editor.org/std/std3.txt for more details.
+//
+// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
+func StrictDomainName(use bool) Option {
+ return func(o *options) { o.useSTD3Rules = use }
+}
+
+// NOTE: the following options pull in tables. The tables should not be linked
+// in as long as the options are not used.
+
+// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
+// that relies on proper validation of labels should include this rule.
+//
+// This option corresponds to the CheckBidi flag in UTS #46.
+func BidiRule() Option {
+ return func(o *options) { o.bidirule = bidirule.ValidString }
+}
+
+// ValidateForRegistration sets validation options to verify that a given IDN is
+// properly formatted for registration as defined by Section 4 of RFC 5891.
+func ValidateForRegistration() Option {
+ return func(o *options) {
+ o.mapping = validateRegistration
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ VerifyDNSLength(true)(o)
+ BidiRule()(o)
+ }
+}
+
+// MapForLookup sets validation and mapping options such that a given IDN is
+// transformed for domain name lookup according to the requirements set out in
+// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
+// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
+// to add this check.
+//
+// The mappings include normalization and mapping case, width and other
+// compatibility mappings.
+func MapForLookup() Option {
+ return func(o *options) {
+ o.mapping = validateAndMap
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ RemoveLeadingDots(true)(o)
+ }
+}
+
+type options struct {
+ transitional bool
+ useSTD3Rules bool
+ checkHyphens bool
+ checkJoiners bool
+ verifyDNSLength bool
+ removeLeadingDots bool
+
+ trie *idnaTrie
+
+ // fromPuny calls validation rules when converting A-labels to U-labels.
+ fromPuny func(p *Profile, s string) error
+
+ // mapping implements a validation and mapping step as defined in RFC 5895
+ // or UTS 46, tailored to, for example, domain registration or lookup.
+ mapping func(p *Profile, s string) (string, error)
+
+ // bidirule, if specified, checks whether s conforms to the Bidi Rule
+ // defined in RFC 5893.
+ bidirule func(s string) bool
+}
+
+// A Profile defines the configuration of a IDNA mapper.
+type Profile struct {
+ options
+}
+
+func apply(o *options, opts []Option) {
+ for _, f := range opts {
+ f(o)
+ }
+}
+
+// New creates a new Profile.
+//
+// With no options, the returned Profile is the most permissive and equals the
+// Punycode Profile. Options can be passed to further restrict the Profile. The
+// MapForLookup and ValidateForRegistration options set a collection of options,
+// for lookup and registration purposes respectively, which can be tailored by
+// adding more fine-grained options, where later options override earlier
+// options.
+func New(o ...Option) *Profile {
+ p := &Profile{}
+ apply(&p.options, o)
+ return p
+}
+
+// ToASCII converts a domain or domain label to its ASCII form. For example,
+// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
+// ToASCII("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToASCII(s string) (string, error) {
+ return p.process(s, true)
+}
+
+// ToUnicode converts a domain or domain label to its Unicode form. For example,
+// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
+// ToUnicode("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToUnicode(s string) (string, error) {
+ pp := *p
+ pp.transitional = false
+ return pp.process(s, false)
+}
+
+// String reports a string with a description of the profile for debugging
+// purposes. The string format may change with different versions.
+func (p *Profile) String() string {
+ s := ""
+ if p.transitional {
+ s = "Transitional"
+ } else {
+ s = "NonTransitional"
+ }
+ if p.useSTD3Rules {
+ s += ":UseSTD3Rules"
+ }
+ if p.checkHyphens {
+ s += ":CheckHyphens"
+ }
+ if p.checkJoiners {
+ s += ":CheckJoiners"
+ }
+ if p.verifyDNSLength {
+ s += ":VerifyDNSLength"
+ }
+ return s
+}
+
+var (
+ // Punycode is a Profile that does raw punycode processing with a minimum
+ // of validation.
+ Punycode *Profile = punycode
+
+ // Lookup is the recommended profile for looking up domain names, according
+ // to Section 5 of RFC 5891. The exact configuration of this profile may
+ // change over time.
+ Lookup *Profile = lookup
+
+ // Display is the recommended profile for displaying domain names.
+ // The configuration of this profile may change over time.
+ Display *Profile = display
+
+ // Registration is the recommended profile for checking whether a given
+ // IDN is valid for registration, according to Section 4 of RFC 5891.
+ Registration *Profile = registration
+
+ punycode = &Profile{}
+ lookup = &Profile{options{
+ transitional: true,
+ removeLeadingDots: true,
+ useSTD3Rules: true,
+ checkHyphens: true,
+ checkJoiners: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ display = &Profile{options{
+ useSTD3Rules: true,
+ removeLeadingDots: true,
+ checkHyphens: true,
+ checkJoiners: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ registration = &Profile{options{
+ useSTD3Rules: true,
+ verifyDNSLength: true,
+ checkHyphens: true,
+ checkJoiners: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateRegistration,
+ bidirule: bidirule.ValidString,
+ }}
+
+ // TODO: profiles
+ // Register: recommended for approving domain names: don't do any mappings
+ // but rather reject on invalid input. Bundle or block deviation characters.
+)
+
+type labelError struct{ label, code_ string }
+
+func (e labelError) code() string { return e.code_ }
+func (e labelError) Error() string {
+ return fmt.Sprintf("idna: invalid label %q", e.label)
+}
+
+type runeError rune
+
+func (e runeError) code() string { return "P1" }
+func (e runeError) Error() string {
+ return fmt.Sprintf("idna: disallowed rune %U", e)
+}
+
+// process implements the algorithm described in section 4 of UTS #46,
+// see https://www.unicode.org/reports/tr46.
+func (p *Profile) process(s string, toASCII bool) (string, error) {
+ var err error
+ if p.mapping != nil {
+ s, err = p.mapping(p, s)
+ }
+ // Remove leading empty labels.
+ if p.removeLeadingDots {
+ for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
+ }
+ }
+ // It seems like we should only create this error on ToASCII, but the
+ // UTS 46 conformance tests suggests we should always check this.
+ if err == nil && p.verifyDNSLength && s == "" {
+ err = &labelError{s, "A4"}
+ }
+ labels := labelIter{orig: s}
+ for ; !labels.done(); labels.next() {
+ label := labels.label()
+ if label == "" {
+ // Empty labels are not okay. The label iterator skips the last
+ // label if it is empty.
+ if err == nil && p.verifyDNSLength {
+ err = &labelError{s, "A4"}
+ }
+ continue
+ }
+ if strings.HasPrefix(label, acePrefix) {
+ u, err2 := decode(label[len(acePrefix):])
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ // Spec says keep the old label.
+ continue
+ }
+ labels.set(u)
+ if err == nil && p.fromPuny != nil {
+ err = p.fromPuny(p, u)
+ }
+ if err == nil {
+ // This should be called on NonTransitional, according to the
+ // spec, but that currently does not have any effect. Use the
+ // original profile to preserve options.
+ err = p.validateLabel(u)
+ }
+ } else if err == nil {
+ err = p.validateLabel(label)
+ }
+ }
+ if toASCII {
+ for labels.reset(); !labels.done(); labels.next() {
+ label := labels.label()
+ if !ascii(label) {
+ a, err2 := encode(acePrefix, label)
+ if err == nil {
+ err = err2
+ }
+ label = a
+ labels.set(a)
+ }
+ n := len(label)
+ if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
+ err = &labelError{label, "A4"}
+ }
+ }
+ }
+ s = labels.result()
+ if toASCII && p.verifyDNSLength && err == nil {
+ // Compute the length of the domain name minus the root label and its dot.
+ n := len(s)
+ if n > 0 && s[n-1] == '.' {
+ n--
+ }
+ if len(s) < 1 || n > 253 {
+ err = &labelError{s, "A4"}
+ }
+ }
+ return s, err
+}
+
+func normalize(p *Profile, s string) (string, error) {
+ return norm.NFC.String(s), nil
+}
+
+func validateRegistration(p *Profile, s string) (string, error) {
+ if !norm.NFC.IsNormalString(s) {
+ return s, &labelError{s, "V1"}
+ }
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ // TODO: handle the NV8 defined in the Unicode idna data set to allow
+ // for strict conformance to IDNA2008.
+ case valid, deviation:
+ case disallowed, mapped, unknown, ignored:
+ r, _ := utf8.DecodeRuneInString(s[i:])
+ return s, runeError(r)
+ }
+ i += sz
+ }
+ return s, nil
+}
+
+func validateAndMap(p *Profile, s string) (string, error) {
+ var (
+ err error
+ b []byte
+ k int
+ )
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ start := i
+ i += sz
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ case valid:
+ continue
+ case disallowed:
+ if err == nil {
+ r, _ := utf8.DecodeRuneInString(s[start:])
+ err = runeError(r)
+ }
+ continue
+ case mapped, deviation:
+ b = append(b, s[k:start]...)
+ b = info(v).appendMapping(b, s[start:i])
+ case ignored:
+ b = append(b, s[k:start]...)
+ // drop the rune
+ case unknown:
+ b = append(b, s[k:start]...)
+ b = append(b, "\ufffd"...)
+ }
+ k = i
+ }
+ if k == 0 {
+ // No changes so far.
+ s = norm.NFC.String(s)
+ } else {
+ b = append(b, s[k:]...)
+ if norm.NFC.QuickSpan(b) != len(b) {
+ b = norm.NFC.Bytes(b)
+ }
+ // TODO: the punycode converters require strings as input.
+ s = string(b)
+ }
+ return s, err
+}
+
+// A labelIter allows iterating over domain name labels.
+type labelIter struct {
+ orig string
+ slice []string
+ curStart int
+ curEnd int
+ i int
+}
+
+func (l *labelIter) reset() {
+ l.curStart = 0
+ l.curEnd = 0
+ l.i = 0
+}
+
+func (l *labelIter) done() bool {
+ return l.curStart >= len(l.orig)
+}
+
+func (l *labelIter) result() string {
+ if l.slice != nil {
+ return strings.Join(l.slice, ".")
+ }
+ return l.orig
+}
+
+func (l *labelIter) label() string {
+ if l.slice != nil {
+ return l.slice[l.i]
+ }
+ p := strings.IndexByte(l.orig[l.curStart:], '.')
+ l.curEnd = l.curStart + p
+ if p == -1 {
+ l.curEnd = len(l.orig)
+ }
+ return l.orig[l.curStart:l.curEnd]
+}
+
+// next sets the value to the next label. It skips the last label if it is empty.
+func (l *labelIter) next() {
+ l.i++
+ if l.slice != nil {
+ if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
+ l.curStart = len(l.orig)
+ }
+ } else {
+ l.curStart = l.curEnd + 1
+ if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
+ l.curStart = len(l.orig)
+ }
+ }
+}
+
+func (l *labelIter) set(s string) {
+ if l.slice == nil {
+ l.slice = strings.Split(l.orig, ".")
+ }
+ l.slice[l.i] = s
+}
+
+// acePrefix is the ASCII Compatible Encoding prefix.
+const acePrefix = "xn--"
+
+func (p *Profile) simplify(cat category) category {
+ switch cat {
+ case disallowedSTD3Mapped:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = mapped
+ }
+ case disallowedSTD3Valid:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = valid
+ }
+ case deviation:
+ if !p.transitional {
+ cat = valid
+ }
+ case validNV8, validXV8:
+ // TODO: handle V2008
+ cat = valid
+ }
+ return cat
+}
+
+func validateFromPunycode(p *Profile, s string) error {
+ if !norm.NFC.IsNormalString(s) {
+ return &labelError{s, "V1"}
+ }
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if c := p.simplify(info(v).category()); c != valid && c != deviation {
+ return &labelError{s, "V6"}
+ }
+ i += sz
+ }
+ return nil
+}
+
+const (
+ zwnj = "\u200c"
+ zwj = "\u200d"
+)
+
+type joinState int8
+
+const (
+ stateStart joinState = iota
+ stateVirama
+ stateBefore
+ stateBeforeVirama
+ stateAfter
+ stateFAIL
+)
+
+var joinStates = [][numJoinTypes]joinState{
+ stateStart: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateVirama,
+ },
+ stateVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ },
+ stateBefore: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ joinZWNJ: stateAfter,
+ joinZWJ: stateFAIL,
+ joinVirama: stateBeforeVirama,
+ },
+ stateBeforeVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ },
+ stateAfter: {
+ joiningL: stateFAIL,
+ joiningD: stateBefore,
+ joiningT: stateAfter,
+ joiningR: stateStart,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateAfter, // no-op as we can't accept joiners here
+ },
+ stateFAIL: {
+ 0: stateFAIL,
+ joiningL: stateFAIL,
+ joiningD: stateFAIL,
+ joiningT: stateFAIL,
+ joiningR: stateFAIL,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateFAIL,
+ },
+}
+
+// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
+// already implicitly satisfied by the overall implementation.
+func (p *Profile) validateLabel(s string) error {
+ if s == "" {
+ if p.verifyDNSLength {
+ return &labelError{s, "A4"}
+ }
+ return nil
+ }
+ if p.bidirule != nil && !p.bidirule(s) {
+ return &labelError{s, "B"}
+ }
+ if p.checkHyphens {
+ if len(s) > 4 && s[2] == '-' && s[3] == '-' {
+ return &labelError{s, "V2"}
+ }
+ if s[0] == '-' || s[len(s)-1] == '-' {
+ return &labelError{s, "V3"}
+ }
+ }
+ if !p.checkJoiners {
+ return nil
+ }
+ trie := p.trie // p.checkJoiners is only set if trie is set.
+ // TODO: merge the use of this in the trie.
+ v, sz := trie.lookupString(s)
+ x := info(v)
+ if x.isModifier() {
+ return &labelError{s, "V5"}
+ }
+ // Quickly return in the absence of zero-width (non) joiners.
+ if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
+ return nil
+ }
+ st := stateStart
+ for i := 0; ; {
+ jt := x.joinType()
+ if s[i:i+sz] == zwj {
+ jt = joinZWJ
+ } else if s[i:i+sz] == zwnj {
+ jt = joinZWNJ
+ }
+ st = joinStates[st][jt]
+ if x.isViramaModifier() {
+ st = joinStates[st][joinVirama]
+ }
+ if i += sz; i == len(s) {
+ break
+ }
+ v, sz = trie.lookupString(s[i:])
+ x = info(v)
+ }
+ if st == stateFAIL || st == stateAfter {
+ return &labelError{s, "C"}
+ }
+ return nil
+}
+
+func ascii(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go
new file mode 100644
index 0000000..40e74bb
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/pre_go118.go
@@ -0,0 +1,11 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+
+package idna
+
+const transitionalLookup = true
diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go
new file mode 100644
index 0000000..e8e3ac1
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/punycode.go
@@ -0,0 +1,217 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package idna
+
+// This file implements the Punycode algorithm from RFC 3492.
+
+import (
+ "math"
+ "strings"
+ "unicode/utf8"
+)
+
+// These parameter values are specified in section 5.
+//
+// All computation is done with int32s, so that overflow behavior is identical
+// regardless of whether int is 32-bit or 64-bit.
+const (
+ base int32 = 36
+ damp int32 = 700
+ initialBias int32 = 72
+ initialN int32 = 128
+ skew int32 = 38
+ tmax int32 = 26
+ tmin int32 = 1
+)
+
+func punyError(s string) error { return &labelError{s, "A3"} }
+
+// decode decodes a string as specified in section 6.2.
+func decode(encoded string) (string, error) {
+ if encoded == "" {
+ return "", nil
+ }
+ pos := 1 + strings.LastIndex(encoded, "-")
+ if pos == 1 {
+ return "", punyError(encoded)
+ }
+ if pos == len(encoded) {
+ return encoded[:len(encoded)-1], nil
+ }
+ output := make([]rune, 0, len(encoded))
+ if pos != 0 {
+ for _, r := range encoded[:pos-1] {
+ output = append(output, r)
+ }
+ }
+ i, n, bias := int32(0), initialN, initialBias
+ overflow := false
+ for pos < len(encoded) {
+ oldI, w := i, int32(1)
+ for k := base; ; k += base {
+ if pos == len(encoded) {
+ return "", punyError(encoded)
+ }
+ digit, ok := decodeDigit(encoded[pos])
+ if !ok {
+ return "", punyError(encoded)
+ }
+ pos++
+ i, overflow = madd(i, digit, w)
+ if overflow {
+ return "", punyError(encoded)
+ }
+ t := k - bias
+ if k <= bias {
+ t = tmin
+ } else if k >= bias+tmax {
+ t = tmax
+ }
+ if digit < t {
+ break
+ }
+ w, overflow = madd(0, w, base-t)
+ if overflow {
+ return "", punyError(encoded)
+ }
+ }
+ if len(output) >= 1024 {
+ return "", punyError(encoded)
+ }
+ x := int32(len(output) + 1)
+ bias = adapt(i-oldI, x, oldI == 0)
+ n += i / x
+ i %= x
+ if n < 0 || n > utf8.MaxRune {
+ return "", punyError(encoded)
+ }
+ output = append(output, 0)
+ copy(output[i+1:], output[i:])
+ output[i] = n
+ i++
+ }
+ return string(output), nil
+}
+
+// encode encodes a string as specified in section 6.3 and prepends prefix to
+// the result.
+//
+// The "while h < length(input)" line in the specification becomes "for
+// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
+func encode(prefix, s string) (string, error) {
+ output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
+ copy(output, prefix)
+ delta, n, bias := int32(0), initialN, initialBias
+ b, remaining := int32(0), int32(0)
+ for _, r := range s {
+ if r < 0x80 {
+ b++
+ output = append(output, byte(r))
+ } else {
+ remaining++
+ }
+ }
+ h := b
+ if b > 0 {
+ output = append(output, '-')
+ }
+ overflow := false
+ for remaining != 0 {
+ m := int32(0x7fffffff)
+ for _, r := range s {
+ if m > r && r >= n {
+ m = r
+ }
+ }
+ delta, overflow = madd(delta, m-n, h+1)
+ if overflow {
+ return "", punyError(s)
+ }
+ n = m
+ for _, r := range s {
+ if r < n {
+ delta++
+ if delta < 0 {
+ return "", punyError(s)
+ }
+ continue
+ }
+ if r > n {
+ continue
+ }
+ q := delta
+ for k := base; ; k += base {
+ t := k - bias
+ if k <= bias {
+ t = tmin
+ } else if k >= bias+tmax {
+ t = tmax
+ }
+ if q < t {
+ break
+ }
+ output = append(output, encodeDigit(t+(q-t)%(base-t)))
+ q = (q - t) / (base - t)
+ }
+ output = append(output, encodeDigit(q))
+ bias = adapt(delta, h+1, h == b)
+ delta = 0
+ h++
+ remaining--
+ }
+ delta++
+ n++
+ }
+ return string(output), nil
+}
+
+// madd computes a + (b * c), detecting overflow.
+func madd(a, b, c int32) (next int32, overflow bool) {
+ p := int64(b) * int64(c)
+ if p > math.MaxInt32-int64(a) {
+ return 0, true
+ }
+ return a + int32(p), false
+}
+
+func decodeDigit(x byte) (digit int32, ok bool) {
+ switch {
+ case '0' <= x && x <= '9':
+ return int32(x - ('0' - 26)), true
+ case 'A' <= x && x <= 'Z':
+ return int32(x - 'A'), true
+ case 'a' <= x && x <= 'z':
+ return int32(x - 'a'), true
+ }
+ return 0, false
+}
+
+func encodeDigit(digit int32) byte {
+ switch {
+ case 0 <= digit && digit < 26:
+ return byte(digit + 'a')
+ case 26 <= digit && digit < 36:
+ return byte(digit + ('0' - 26))
+ }
+ panic("idna: internal error in punycode encoding")
+}
+
+// adapt is the bias adaptation function specified in section 6.1.
+func adapt(delta, numPoints int32, firstTime bool) int32 {
+ if firstTime {
+ delta /= damp
+ } else {
+ delta /= 2
+ }
+ delta += delta / numPoints
+ k := int32(0)
+ for delta > ((base-tmin)*tmax)/2 {
+ delta /= base - tmin
+ k += base
+ }
+ return k + (base-tmin+1)*delta/(delta+skew)
+}
diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go
new file mode 100644
index 0000000..c6c2bf1
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables10.0.0.go
@@ -0,0 +1,4559 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+//go:build go1.10 && !go1.13
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "10.0.0"
+
+var mappings string = "" + // Size: 8175 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" +
+ "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" +
+ "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" +
+ "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" +
+ "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" +
+ "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" +
+ "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" +
+ "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" +
+ "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" +
+ "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" +
+ "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" +
+ "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" +
+ "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" +
+ "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" +
+ "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" +
+ "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" +
+ "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" +
+ "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" +
+ "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" +
+ "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" +
+ "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" +
+ "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" +
+ "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" +
+ "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" +
+ "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" +
+ "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" +
+ "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" +
+ "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" +
+ "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" +
+ "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" +
+ "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" +
+ "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" +
+ "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" +
+ "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" +
+ "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4855 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" +
+ "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" +
+ "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" +
+ "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" +
+ "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" +
+ "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" +
+ "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" +
+ "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" +
+ "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" +
+ "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" +
+ "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" +
+ "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" +
+ "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" +
+ "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" +
+ "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" +
+ "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" +
+ "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" +
+ "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" +
+ "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" +
+ "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" +
+ "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" +
+ "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" +
+ "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" +
+ "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" +
+ "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" +
+ "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" +
+ "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" +
+ "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " +
+ "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" +
+ "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" +
+ "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" +
+ "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" +
+ "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" +
+ ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" +
+ "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" +
+ "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" +
+ "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" +
+ "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" +
+ "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" +
+ "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" +
+ "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" +
+ "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" +
+ "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" +
+ "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" +
+ "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" +
+ "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" +
+ "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" +
+ "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" +
+ "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" +
+ "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" +
+ "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" +
+ "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" +
+ "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" +
+ "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" +
+ "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" +
+ "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" +
+ "\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" +
+ "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" +
+ "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" +
+ "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" +
+ "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" +
+ "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" +
+ "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" +
+ "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" +
+ "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" +
+ "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" +
+ "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" +
+ "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" +
+ "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" +
+ "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" +
+ "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" +
+ "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" +
+ "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" +
+ "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" +
+ "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" +
+ "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" +
+ "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." +
+ "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 125:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 125
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 127 blocks, 8128 entries, 16256 bytes
+// The third block is the zero block.
+var idnaValues = [8128]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,
+ 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,
+ 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,
+ 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040,
+ 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,
+ 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,
+ 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,
+ 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,
+ 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,
+ 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,
+ 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,
+ 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9,
+ 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099,
+ 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,
+ 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,
+ 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,
+ 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,
+ 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251,
+ 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,
+ 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,
+ 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,
+ 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,
+ 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,
+ 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,
+ 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,
+ 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,
+ 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,
+ 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,
+ 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,
+ 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,
+ 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459,
+ 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489,
+ 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,
+ 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,
+ 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,
+ 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,
+ 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd,
+ 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,
+ 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5,
+ 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,
+ 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,
+ 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e,
+ 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249,
+ 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,
+ 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018,
+ 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,
+ 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,
+ 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,
+ 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd,
+ 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,
+ 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,
+ 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,
+ 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,
+ 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439,
+ 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,
+ 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,
+ 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,
+ 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5,
+ 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,
+ 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,
+ 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,
+ 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26,
+ 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6,
+ 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,
+ 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46,
+ 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06,
+ 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6,
+ 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86,
+ 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46,
+ 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,
+ 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,
+ 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,
+ 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,
+ 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,
+ 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,
+ 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,
+ 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,
+ 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd,
+ 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,
+ 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d,
+ 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d,
+ 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d,
+ 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd,
+ 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd,
+ 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d,
+ 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d,
+ 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d,
+ 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd,
+ 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d,
+ 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd,
+ 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d,
+ 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd,
+ 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd,
+ 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d,
+ 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd,
+ 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d,
+ 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,
+ 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd,
+ 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761,
+ 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,
+ 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,
+ 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd,
+ 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d,
+ 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d,
+ 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd,
+ 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d,
+ 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d,
+ 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d,
+ 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd,
+ 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd,
+ 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d,
+ 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d,
+ 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd,
+ 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d,
+ 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,
+ 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,
+ 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,
+ 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,
+ 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15,
+ 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75,
+ 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded,
+ 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d,
+ 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5,
+ 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d,
+ 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d,
+ 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd,
+ 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,
+ 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,
+ 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,
+ 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,
+ 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,
+ 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,
+ 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,
+ 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,
+ 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,
+ 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,
+ 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,
+ 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,
+ 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,
+ 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,
+ 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,
+ 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,
+ 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,
+ 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,
+ 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069,
+ 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9,
+ 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,
+ 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,
+ 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed,
+ 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371,
+ 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9,
+ 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d,
+ 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,
+ 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1,
+ 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,
+ 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,
+ 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,
+ 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,
+ 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,
+ 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1,
+ 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,
+ 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,
+ 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,
+ 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,
+ 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,
+ 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,
+ 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,
+ 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,
+ 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,
+ 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,
+ 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,
+ 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,
+ 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,
+ 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,
+ 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,
+ 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,
+ 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,
+ 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,
+ 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,
+ 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,
+ 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,
+ 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,
+ 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,
+ 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,
+ 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,
+ 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,
+ 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,
+ 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,
+ 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040,
+ 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008,
+ 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040,
+ 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575,
+ 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635,
+ 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008,
+ 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715,
+ 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5,
+ 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935,
+ 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5,
+ 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5,
+ 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35,
+ 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5,
+ 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,
+ 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,
+ 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001,
+ 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,
+ 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,
+ 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,
+ 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,
+ 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,
+ 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,
+ 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,
+ 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,
+ 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,
+ 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,
+ 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,
+ 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,
+ 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,
+ 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,
+ 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,
+ 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,
+ 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,
+ 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,
+ 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,
+ 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,
+ 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,
+ 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,
+ 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,
+ 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,
+ 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,
+ 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,
+ 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,
+ 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,
+ 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,
+ 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,
+ 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,
+ 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,
+ 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,
+ 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,
+ 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,
+ 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,
+ 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,
+ 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,
+ 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,
+ 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,
+ 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,
+ 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,
+ 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,
+ 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,
+ 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,
+ 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,
+ 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,
+ 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,
+ 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,
+ 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,
+ 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,
+ 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,
+ 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,
+ 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,
+ 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,
+ 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,
+ 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,
+ 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,
+ 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,
+ 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,
+ 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,
+ 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,
+ 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,
+ 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,
+ 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,
+ 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,
+ 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,
+ 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,
+ 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,
+ 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,
+ 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,
+ 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,
+ 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,
+ 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,
+ 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,
+ 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,
+ 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,
+ 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,
+ 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,
+ 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,
+ 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,
+ 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,
+ 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,
+ 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55,
+ 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75,
+ 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,
+ 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35,
+ 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56,
+ 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,
+ 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95,
+ 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,
+ 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,
+ 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040,
+ 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081,
+ 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,
+ 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,
+ 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,
+ 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,
+ 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,
+ 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,
+ 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,
+ 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,
+ 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,
+ 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,
+ 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,
+ 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,
+ 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,
+ 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,
+ 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,
+ 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,
+ 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,
+ 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,
+ 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,
+ 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,
+ 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,
+ 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,
+ 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,
+ 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,
+ 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,
+ 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,
+ 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,
+ 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,
+ 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,
+ 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,
+ 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,
+ 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,
+ 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,
+ 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,
+ 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,
+ 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115,
+ 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5,
+ 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295,
+ 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355,
+ 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415,
+ 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515,
+ 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595,
+ 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5,
+ 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655,
+ 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115,
+ 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735,
+ 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5,
+ 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5,
+ 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5,
+ 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5,
+ 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5,
+ 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715,
+ 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6,
+ 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35,
+ 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,
+ 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,
+ 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,
+ 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,
+ 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,
+ 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,
+ 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,
+ 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,
+ 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,
+ 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,
+ 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,
+ 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,
+ 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,
+ 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,
+ 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,
+ 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,
+ 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040,
+ 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,
+ 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,
+ 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,
+ 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,
+ 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,
+ 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,
+ 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,
+ 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,
+ 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,
+ 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,
+ 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,
+ 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,
+ 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,
+ 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,
+ 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,
+ 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,
+ 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,
+ 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,
+ 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,
+ 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,
+ 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,
+ 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,
+ 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,
+ 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,
+ 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,
+ 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,
+ 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,
+ 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,
+ 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,
+ 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,
+ 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,
+ 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,
+ 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,
+ 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,
+ 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,
+ 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,
+ 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,
+ 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,
+ 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,
+ 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,
+ 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,
+ 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,
+ 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,
+ 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,
+ 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,
+ 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,
+ 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,
+ 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,
+ 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,
+ 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,
+ 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,
+ 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,
+ 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,
+ 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,
+ 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,
+ 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,
+ 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,
+ 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,
+ 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,
+ 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,
+ 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,
+ 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,
+ 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,
+ 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,
+ 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,
+ 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,
+ 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,
+ 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,
+ 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,
+ 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,
+ 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,
+ 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,
+ 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,
+ 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,
+ 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,
+ 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,
+ 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,
+ 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,
+ 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,
+ 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,
+ 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,
+ 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,
+ 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,
+ 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,
+ 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,
+ 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,
+ 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,
+ 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,
+ 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,
+ 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,
+ 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,
+ 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,
+ 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,
+ 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,
+ 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,
+ 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,
+ 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,
+ 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,
+ 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,
+ 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,
+ 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,
+ 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,
+ 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,
+ 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,
+ 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,
+ 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,
+ 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,
+ 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,
+ 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,
+ 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,
+ 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,
+ 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,
+ 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,
+ 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,
+ 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,
+ 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,
+ 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,
+ 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,
+ 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,
+ 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,
+ 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,
+ 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,
+ 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,
+ 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,
+ 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,
+ 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,
+ 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,
+ 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,
+ 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,
+ 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,
+ 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,
+ 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,
+ 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,
+ 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,
+ 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,
+ 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,
+ 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,
+ 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,
+ 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,
+ 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,
+ 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,
+ 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,
+ 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,
+ 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,
+ 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,
+ 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,
+ 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,
+ 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,
+ 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,
+ 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,
+ 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,
+ 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,
+ 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,
+ 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,
+ 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,
+ 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,
+ 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,
+ 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,
+ 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,
+ 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,
+ 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,
+ 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,
+ 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,
+ 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,
+ 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,
+ 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,
+ 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,
+ 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,
+ 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,
+ 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,
+ 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,
+ 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,
+ 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,
+ 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,
+ 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,
+ 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,
+ 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,
+ 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,
+ 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,
+ 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,
+ 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,
+ 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,
+ 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,
+ 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,
+ 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,
+ 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,
+ 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,
+ 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,
+ 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,
+ 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,
+ 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,
+ 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,
+ 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,
+ 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,
+ 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,
+ 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,
+ 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,
+ 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,
+ 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,
+ 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,
+ 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,
+ 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,
+ 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,
+ 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,
+ 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040,
+ 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,
+ 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,
+ 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,
+ 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,
+ 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,
+ 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,
+ 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,
+ 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,
+ 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,
+ 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,
+ 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,
+ 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,
+ 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,
+ 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289,
+ 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349,
+ 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409,
+ 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9,
+ 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589,
+ 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649,
+ 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709,
+ 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9,
+ 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79,
+ 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39,
+ 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9,
+ 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39,
+ 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9,
+ 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79,
+ 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39,
+ 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9,
+ 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059,
+ 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9,
+ 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239,
+ 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9,
+ 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399,
+ 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459,
+ 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309,
+ 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559,
+ 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9,
+ 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679,
+ 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9,
+ 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d,
+ 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9,
+ 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959,
+ 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d,
+ 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d,
+ 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9,
+ 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99,
+ 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9,
+ 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9,
+ 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99,
+ 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39,
+ 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639,
+ 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9,
+ 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d,
+ 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9,
+ 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d,
+ 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd,
+ 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979,
+ 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19,
+ 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d,
+ 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d,
+ 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99,
+ 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39,
+ 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9,
+ 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39,
+ 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd,
+ 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19,
+ 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9,
+ 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59,
+ 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd,
+ 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d,
+ 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d,
+ 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d,
+ 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879,
+ 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919,
+ 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd,
+ 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9,
+ 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99,
+ 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39,
+ 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9,
+ 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d,
+ 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19,
+ 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9,
+ 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59,
+ 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9,
+ 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d,
+ 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,
+ 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,
+ 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,
+ 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,
+ 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,
+ 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,
+}
+
+// idnaIndex: 36 blocks, 2304 entries, 4608 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2304]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,
+ // Block 0x4, offset 0x100
+ 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,
+ 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0,
+ 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5,
+ 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1,
+ 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8,
+ 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0,
+ 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe,
+ 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,
+ 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52,
+ 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e,
+ 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c,
+ 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba,
+ 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba,
+ // Block 0x10, offset 0x400
+ 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e,
+ 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137,
+ 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,
+ 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f,
+ 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160,
+ 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,
+ 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e,
+ 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172,
+ 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179,
+ 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,
+ 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,
+ 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,
+ 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,
+ 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,
+ 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,
+ 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,
+ 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,
+ 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,
+ 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b,
+ 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,
+ 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,
+ 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,
+ // Block 0x1f, offset 0x7c0
+ 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,
+ 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,
+ 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,
+ 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,
+ 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,
+ 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,
+ 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,
+ 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,
+ 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,
+ 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184,
+ 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,
+ 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,
+ 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,
+ 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,
+ 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,
+ 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,
+ 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+ 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,
+ 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,
+ 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,
+ 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,
+ 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,
+ 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+}
+
+// idnaSparseOffset: 264 entries, 528 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778}
+
+// idnaSparseValues: 1915 entries, 7660 bytes
+var idnaSparseValues = [1915]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x6, offset 0x34
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4f
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x63
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0xc, offset 0x6b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x77
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xe, offset 0x85
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x93
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa3
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb1
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbd
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xc9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xda
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe4
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xeb
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x109
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x110
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11b
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12a
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x138
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x142
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x144
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x149
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x14f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x151
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x168
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x170
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x176
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x181
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x186
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x189
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x193
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x198
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a4
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1ae
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b4
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1cf
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x34, offset 0x1d2
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1da
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1dd
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1ea
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fd
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x205
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x215
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x221
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3e, offset 0x223
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22d
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x239
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x245
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x251
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x259
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x25e
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0x45, offset 0x268
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x46, offset 0x279
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x27d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x288
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x28c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x295
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x29d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a3
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09c5, lo: 0xa9, hi: 0xa9},
+ {value: 0x09e5, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2a8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2ab
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e66, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e86, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2b5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2b9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2bd
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0018, lo: 0xbd, hi: 0xbf},
+ // Block 0x53, offset 0x2c3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0xab},
+ {value: 0x0018, lo: 0xac, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x54, offset 0x2ca
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ea5, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2d0
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x56, offset 0x2d8
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2df
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x58, offset 0x2ea
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x59, offset 0x2f4
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x5a, offset 0x2f8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0x5b, offset 0x2fb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0edd, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5c, offset 0x301
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0efd, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5d, offset 0x305
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f1d, lo: 0x80, hi: 0xbf},
+ // Block 0x5e, offset 0x307
+ {value: 0x0020, lo: 0x02},
+ {value: 0x171d, lo: 0x80, hi: 0x8f},
+ {value: 0x18fd, lo: 0x90, hi: 0xbf},
+ // Block 0x5f, offset 0x30a
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1efd, lo: 0x80, hi: 0xbf},
+ // Block 0x60, offset 0x30c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x61, offset 0x30f
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x62, offset 0x319
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x63, offset 0x31c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x2a1d, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a3d, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a5d, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a7d, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a5d, lo: 0xb5, hi: 0xb5},
+ {value: 0x2a9d, lo: 0xb6, hi: 0xb6},
+ {value: 0x2abd, lo: 0xb7, hi: 0xb7},
+ {value: 0x2add, lo: 0xb8, hi: 0xb9},
+ {value: 0x2afd, lo: 0xba, hi: 0xbb},
+ {value: 0x2b1d, lo: 0xbc, hi: 0xbd},
+ {value: 0x2afd, lo: 0xbe, hi: 0xbf},
+ // Block 0x64, offset 0x32b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x65, offset 0x32f
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x66, offset 0x334
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0x67, offset 0x337
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x68, offset 0x33b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x69, offset 0x340
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6a, offset 0x345
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6b, offset 0x34b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6c, offset 0x351
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6d, offset 0x360
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6e, offset 0x366
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6f, offset 0x36a
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x70, offset 0x379
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x71, offset 0x37e
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x72, offset 0x386
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x73, offset 0x390
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x74, offset 0x39b
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x75, offset 0x3a3
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x76, offset 0x3b4
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x77, offset 0x3bd
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x78, offset 0x3cd
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x79, offset 0x3da
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x4465, lo: 0x9c, hi: 0x9c},
+ {value: 0x447d, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xaf},
+ {value: 0x4495, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3e4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44b5, lo: 0x80, hi: 0x8f},
+ {value: 0x44d5, lo: 0x90, hi: 0x9f},
+ {value: 0x44f5, lo: 0xa0, hi: 0xaf},
+ {value: 0x44d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3e9
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7c, offset 0x3f6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7d, offset 0x3fa
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7e, offset 0x3ff
+ {value: 0x0020, lo: 0x01},
+ {value: 0x4515, lo: 0x80, hi: 0xbf},
+ // Block 0x7f, offset 0x401
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d15, lo: 0x80, hi: 0x94},
+ {value: 0x4ad5, lo: 0x95, hi: 0x95},
+ {value: 0x4fb5, lo: 0x96, hi: 0xbf},
+ // Block 0x80, offset 0x405
+ {value: 0x0020, lo: 0x01},
+ {value: 0x54f5, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x407
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5cf5, lo: 0x80, hi: 0x84},
+ {value: 0x5655, lo: 0x85, hi: 0x85},
+ {value: 0x5d95, lo: 0x86, hi: 0xbf},
+ // Block 0x82, offset 0x40b
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b55, lo: 0x80, hi: 0x8f},
+ {value: 0x6d15, lo: 0x90, hi: 0x90},
+ {value: 0x6d55, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70b5, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x70d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x83, offset 0x414
+ {value: 0x0020, lo: 0x05},
+ {value: 0x72d5, lo: 0x80, hi: 0xad},
+ {value: 0x6535, lo: 0xae, hi: 0xae},
+ {value: 0x7895, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f55, lo: 0xb6, hi: 0xb6},
+ {value: 0x7975, lo: 0xb7, hi: 0xbf},
+ // Block 0x84, offset 0x41a
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x85, offset 0x41e
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x86, offset 0x42e
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x87, offset 0x438
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x88, offset 0x43d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x89, offset 0x440
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8a, offset 0x446
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8b, offset 0x44d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8c, offset 0x452
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8d, offset 0x456
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8e, offset 0x45c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8f, offset 0x461
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x90, offset 0x46a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x91, offset 0x46f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x92, offset 0x475
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8ad5, lo: 0x98, hi: 0x9f},
+ {value: 0x8aed, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x93, offset 0x47c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8aed, lo: 0xb0, hi: 0xb7},
+ {value: 0x8ad5, lo: 0xb8, hi: 0xbf},
+ // Block 0x94, offset 0x483
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x95, offset 0x48a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x96, offset 0x48e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x97, offset 0x493
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x98, offset 0x496
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x99, offset 0x49b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9a, offset 0x4a7
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9b, offset 0x4ad
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9c, offset 0x4b2
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9d, offset 0x4b9
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9e, offset 0x4c1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9f, offset 0x4c6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa0, offset 0x4ca
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa1, offset 0x4da
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa2, offset 0x4e1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa3, offset 0x4e5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa4, offset 0x4e9
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa5, offset 0x4f0
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa6, offset 0x4f2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa7, offset 0x4f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa8, offset 0x4f8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa9, offset 0x4fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xaa, offset 0x500
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xab, offset 0x506
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xac, offset 0x50f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0340, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xad, offset 0x51b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xae, offset 0x522
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xaf, offset 0x52b
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb0, offset 0x533
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb1, offset 0x53a
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb2, offset 0x548
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb3, offset 0x555
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb4, offset 0x562
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb5, offset 0x56b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb6, offset 0x56f
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xb7, offset 0x57d
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xb8, offset 0x585
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xb9, offset 0x590
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xba, offset 0x599
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbb, offset 0x59f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbc, offset 0x5a7
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xbd, offset 0x5b0
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xbe, offset 0x5ba
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xbf, offset 0x5bd
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc0, offset 0x5c9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc1, offset 0x5cc
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc2, offset 0x5d1
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xc3, offset 0x5de
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xc4, offset 0x5e7
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xc5, offset 0x5f3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc6, offset 0x5f6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xc7, offset 0x600
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xc8, offset 0x609
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xc9, offset 0x615
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xca, offset 0x622
+ {value: 0x0000, lo: 0x07},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xcb, offset 0x62a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xcc, offset 0x62d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xcd, offset 0x632
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xce, offset 0x635
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0xcf, offset 0x638
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xd0, offset 0x63b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xd1, offset 0x642
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xd2, offset 0x649
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xd3, offset 0x64d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xd4, offset 0x658
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xd5, offset 0x65b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd6, offset 0x661
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xd7, offset 0x666
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xd8, offset 0x66a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xd9, offset 0x66d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xda, offset 0x670
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xdb, offset 0x673
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xdc, offset 0x676
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xdd, offset 0x679
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xde, offset 0x67e
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xdf, offset 0x688
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xe0, offset 0x68b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xe1, offset 0x68f
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xe2, offset 0x69e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xe3, offset 0x6aa
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xe4, offset 0x6ae
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xe5, offset 0x6b3
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe6, offset 0x6b8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xe7, offset 0x6bc
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xe8, offset 0x6c1
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xe9, offset 0x6ca
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xea, offset 0x6d5
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xeb, offset 0x6db
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xec, offset 0x6e3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xed, offset 0x6e7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0xee, offset 0x6eb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0xef, offset 0x6f1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xf0, offset 0x6f7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1c1, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xf1, offset 0x6fc
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0xf2, offset 0x6ff
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xc7e9, lo: 0x80, hi: 0x80},
+ {value: 0xc839, lo: 0x81, hi: 0x81},
+ {value: 0xc889, lo: 0x82, hi: 0x82},
+ {value: 0xc8d9, lo: 0x83, hi: 0x83},
+ {value: 0xc929, lo: 0x84, hi: 0x84},
+ {value: 0xc979, lo: 0x85, hi: 0x85},
+ {value: 0xc9c9, lo: 0x86, hi: 0x86},
+ {value: 0xca19, lo: 0x87, hi: 0x87},
+ {value: 0xca69, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcab9, lo: 0x90, hi: 0x90},
+ {value: 0xcad9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0xf3, offset 0x70f
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xf4, offset 0x716
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf5, offset 0x719
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0xbf},
+ // Block 0xf6, offset 0x71c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0xf7, offset 0x720
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0xf8, offset 0x726
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0xf9, offset 0x72b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xfa, offset 0x730
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0xfb, offset 0x735
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xbf},
+ // Block 0xfc, offset 0x738
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0xfd, offset 0x73d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xfe, offset 0x740
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xff, offset 0x743
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x100, offset 0x747
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x101, offset 0x74b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x102, offset 0x74e
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xdeb9, lo: 0x80, hi: 0x89},
+ {value: 0x8dfd, lo: 0x8a, hi: 0x8a},
+ {value: 0xdff9, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e1d, lo: 0x9d, hi: 0x9d},
+ {value: 0xe239, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e3d, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2d9, lo: 0xa4, hi: 0xab},
+ {value: 0x7ed5, lo: 0xac, hi: 0xac},
+ {value: 0xe3d9, lo: 0xad, hi: 0xaf},
+ {value: 0x8e5d, lo: 0xb0, hi: 0xb0},
+ {value: 0xe439, lo: 0xb1, hi: 0xb6},
+ {value: 0x8e7d, lo: 0xb7, hi: 0xb9},
+ {value: 0xe4f9, lo: 0xba, hi: 0xba},
+ {value: 0x8edd, lo: 0xbb, hi: 0xbb},
+ {value: 0xe519, lo: 0xbc, hi: 0xbf},
+ // Block 0x103, offset 0x75e
+ {value: 0x0020, lo: 0x10},
+ {value: 0x937d, lo: 0x80, hi: 0x80},
+ {value: 0xf099, lo: 0x81, hi: 0x86},
+ {value: 0x939d, lo: 0x87, hi: 0x8a},
+ {value: 0xd9f9, lo: 0x8b, hi: 0x8b},
+ {value: 0xf159, lo: 0x8c, hi: 0x96},
+ {value: 0x941d, lo: 0x97, hi: 0x97},
+ {value: 0xf2b9, lo: 0x98, hi: 0xa3},
+ {value: 0x943d, lo: 0xa4, hi: 0xa6},
+ {value: 0xf439, lo: 0xa7, hi: 0xaa},
+ {value: 0x949d, lo: 0xab, hi: 0xab},
+ {value: 0xf4b9, lo: 0xac, hi: 0xac},
+ {value: 0x94bd, lo: 0xad, hi: 0xad},
+ {value: 0xf4d9, lo: 0xae, hi: 0xaf},
+ {value: 0x94dd, lo: 0xb0, hi: 0xb1},
+ {value: 0xf519, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0x104, offset 0x76f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x105, offset 0x774
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x106, offset 0x776
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x107, offset 0x778
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 42114 bytes (41KiB); checksum: 355A58A4
diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go
new file mode 100644
index 0000000..7678939
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables11.0.0.go
@@ -0,0 +1,4653 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+//go:build go1.13 && !go1.14
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "11.0.0"
+
+var mappings string = "" + // Size: 8175 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" +
+ "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" +
+ "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" +
+ "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" +
+ "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" +
+ "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" +
+ "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" +
+ "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" +
+ "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" +
+ "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" +
+ "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" +
+ "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" +
+ "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" +
+ "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" +
+ "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" +
+ "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" +
+ "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" +
+ "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" +
+ "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" +
+ "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" +
+ "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" +
+ "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" +
+ "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" +
+ "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" +
+ "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" +
+ "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" +
+ "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" +
+ "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" +
+ "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" +
+ "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" +
+ "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" +
+ "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" +
+ "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" +
+ "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" +
+ "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4855 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" +
+ "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" +
+ "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" +
+ "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" +
+ "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" +
+ "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" +
+ "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" +
+ "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" +
+ "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" +
+ "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" +
+ "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" +
+ "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" +
+ "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" +
+ "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" +
+ "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" +
+ "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" +
+ "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" +
+ "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" +
+ "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" +
+ "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" +
+ "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" +
+ "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" +
+ "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" +
+ "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" +
+ "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" +
+ "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" +
+ "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" +
+ "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " +
+ "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" +
+ "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" +
+ "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" +
+ "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" +
+ "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" +
+ ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" +
+ "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" +
+ "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" +
+ "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" +
+ "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" +
+ "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" +
+ "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" +
+ "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" +
+ "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" +
+ "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" +
+ "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" +
+ "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" +
+ "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" +
+ "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" +
+ "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" +
+ "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" +
+ "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" +
+ "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" +
+ "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" +
+ "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" +
+ "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" +
+ "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" +
+ "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" +
+ "\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" +
+ "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" +
+ "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" +
+ "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" +
+ "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" +
+ "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" +
+ "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" +
+ "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" +
+ "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" +
+ "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" +
+ "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" +
+ "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" +
+ "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" +
+ "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" +
+ "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" +
+ "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" +
+ "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" +
+ "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" +
+ "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" +
+ "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" +
+ "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." +
+ "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 29404 bytes (28.71 KiB). Checksum: 848c45acb5f7991c.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 125:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 125
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 127 blocks, 8128 entries, 16256 bytes
+// The third block is the zero block.
+var idnaValues = [8128]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,
+ 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,
+ 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,
+ 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040,
+ 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,
+ 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,
+ 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,
+ 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,
+ 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,
+ 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,
+ 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,
+ 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9,
+ 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099,
+ 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,
+ 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,
+ 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,
+ 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,
+ 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251,
+ 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,
+ 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,
+ 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,
+ 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,
+ 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,
+ 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,
+ 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,
+ 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,
+ 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,
+ 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,
+ 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,
+ 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,
+ 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459,
+ 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489,
+ 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,
+ 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,
+ 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,
+ 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,
+ 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd,
+ 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,
+ 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5,
+ 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,
+ 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,
+ 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e,
+ 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249,
+ 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,
+ 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018,
+ 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,
+ 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,
+ 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,
+ 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd,
+ 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,
+ 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,
+ 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,
+ 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,
+ 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439,
+ 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,
+ 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,
+ 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,
+ 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5,
+ 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,
+ 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,
+ 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,
+ 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26,
+ 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6,
+ 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,
+ 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46,
+ 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06,
+ 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6,
+ 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86,
+ 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46,
+ 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,
+ 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,
+ 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,
+ 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,
+ 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,
+ 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,
+ 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,
+ 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,
+ 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd,
+ 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,
+ 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d,
+ 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d,
+ 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d,
+ 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd,
+ 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd,
+ 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d,
+ 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d,
+ 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d,
+ 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd,
+ 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d,
+ 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd,
+ 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d,
+ 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd,
+ 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd,
+ 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d,
+ 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd,
+ 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d,
+ 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,
+ 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd,
+ 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761,
+ 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,
+ 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,
+ 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd,
+ 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d,
+ 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d,
+ 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd,
+ 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d,
+ 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d,
+ 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d,
+ 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd,
+ 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd,
+ 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d,
+ 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d,
+ 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd,
+ 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d,
+ 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,
+ 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,
+ 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,
+ 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,
+ 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15,
+ 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75,
+ 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded,
+ 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d,
+ 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5,
+ 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d,
+ 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d,
+ 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd,
+ 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,
+ 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,
+ 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,
+ 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,
+ 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,
+ 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,
+ 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,
+ 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,
+ 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,
+ 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,
+ 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,
+ 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,
+ 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,
+ 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,
+ 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,
+ 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,
+ 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,
+ 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,
+ 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069,
+ 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9,
+ 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,
+ 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,
+ 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed,
+ 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371,
+ 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9,
+ 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d,
+ 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,
+ 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1,
+ 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,
+ 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,
+ 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,
+ 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,
+ 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,
+ 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1,
+ 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,
+ 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,
+ 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,
+ 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,
+ 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,
+ 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,
+ 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,
+ 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,
+ 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,
+ 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,
+ 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,
+ 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,
+ 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,
+ 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,
+ 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,
+ 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,
+ 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,
+ 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,
+ 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,
+ 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,
+ 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,
+ 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,
+ 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,
+ 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,
+ 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,
+ 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,
+ 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,
+ 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,
+ 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008,
+ 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008,
+ 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0008, 0x123a: 0x0040, 0x123b: 0x0040,
+ 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575,
+ 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635,
+ 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008,
+ 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715,
+ 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5,
+ 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935,
+ 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5,
+ 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5,
+ 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35,
+ 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5,
+ 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,
+ 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,
+ 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001,
+ 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,
+ 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,
+ 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,
+ 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,
+ 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,
+ 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,
+ 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,
+ 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,
+ 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,
+ 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,
+ 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,
+ 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,
+ 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,
+ 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,
+ 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,
+ 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,
+ 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,
+ 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,
+ 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,
+ 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,
+ 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,
+ 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,
+ 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,
+ 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,
+ 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,
+ 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,
+ 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,
+ 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,
+ 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,
+ 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,
+ 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,
+ 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,
+ 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,
+ 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,
+ 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,
+ 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,
+ 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,
+ 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,
+ 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,
+ 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,
+ 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,
+ 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,
+ 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,
+ 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,
+ 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,
+ 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,
+ 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,
+ 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,
+ 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,
+ 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,
+ 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,
+ 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,
+ 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,
+ 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,
+ 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,
+ 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,
+ 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,
+ 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,
+ 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,
+ 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,
+ 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,
+ 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,
+ 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,
+ 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,
+ 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,
+ 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,
+ 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,
+ 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,
+ 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,
+ 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,
+ 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,
+ 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,
+ 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,
+ 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,
+ 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,
+ 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,
+ 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,
+ 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,
+ 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,
+ 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,
+ 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,
+ 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,
+ 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,
+ 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55,
+ 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75,
+ 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,
+ 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35,
+ 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56,
+ 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,
+ 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95,
+ 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,
+ 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,
+ 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040,
+ 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081,
+ 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,
+ 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,
+ 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,
+ 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,
+ 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,
+ 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,
+ 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,
+ 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,
+ 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,
+ 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,
+ 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,
+ 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,
+ 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,
+ 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,
+ 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,
+ 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,
+ 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,
+ 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,
+ 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,
+ 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,
+ 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,
+ 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,
+ 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,
+ 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,
+ 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,
+ 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,
+ 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,
+ 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,
+ 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,
+ 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,
+ 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,
+ 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,
+ 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,
+ 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,
+ 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,
+ 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115,
+ 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5,
+ 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295,
+ 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355,
+ 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415,
+ 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515,
+ 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595,
+ 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5,
+ 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655,
+ 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115,
+ 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735,
+ 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5,
+ 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5,
+ 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5,
+ 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5,
+ 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5,
+ 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715,
+ 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6,
+ 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35,
+ 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,
+ 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,
+ 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,
+ 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,
+ 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,
+ 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,
+ 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,
+ 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,
+ 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,
+ 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,
+ 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,
+ 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,
+ 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,
+ 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,
+ 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,
+ 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,
+ 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308,
+ 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,
+ 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,
+ 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,
+ 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,
+ 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,
+ 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,
+ 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,
+ 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,
+ 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,
+ 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,
+ 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,
+ 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,
+ 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,
+ 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,
+ 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,
+ 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,
+ 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,
+ 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,
+ 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,
+ 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,
+ 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,
+ 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,
+ 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,
+ 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,
+ 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,
+ 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,
+ 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,
+ 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,
+ 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,
+ 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,
+ 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,
+ 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,
+ 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,
+ 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,
+ 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,
+ 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,
+ 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,
+ 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,
+ 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,
+ 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,
+ 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,
+ 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,
+ 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,
+ 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,
+ 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,
+ 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,
+ 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,
+ 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,
+ 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,
+ 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,
+ 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,
+ 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,
+ 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,
+ 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,
+ 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,
+ 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,
+ 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,
+ 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,
+ 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,
+ 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,
+ 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,
+ 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,
+ 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,
+ 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,
+ 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,
+ 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,
+ 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,
+ 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,
+ 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,
+ 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,
+ 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,
+ 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,
+ 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,
+ 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,
+ 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,
+ 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,
+ 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,
+ 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,
+ 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,
+ 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,
+ 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,
+ 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,
+ 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,
+ 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,
+ 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,
+ 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,
+ 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,
+ 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,
+ 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,
+ 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,
+ 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,
+ 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,
+ 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,
+ 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,
+ 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,
+ 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,
+ 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,
+ 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,
+ 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,
+ 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,
+ 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,
+ 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,
+ 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,
+ 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,
+ 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,
+ 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,
+ 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,
+ 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,
+ 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,
+ 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,
+ 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,
+ 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,
+ 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,
+ 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,
+ 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,
+ 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,
+ 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,
+ 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,
+ 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,
+ 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,
+ 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,
+ 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,
+ 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,
+ 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,
+ 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,
+ 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,
+ 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,
+ 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,
+ 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,
+ 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,
+ 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,
+ 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,
+ 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,
+ 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,
+ 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,
+ 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,
+ 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,
+ 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,
+ 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,
+ 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,
+ 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,
+ 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,
+ 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,
+ 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,
+ 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,
+ 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,
+ 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,
+ 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,
+ 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,
+ 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,
+ 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,
+ 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,
+ 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,
+ 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,
+ 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,
+ 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,
+ 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,
+ 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,
+ 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,
+ 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,
+ 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,
+ 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,
+ 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,
+ 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,
+ 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,
+ 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,
+ 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,
+ 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,
+ 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,
+ 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,
+ 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,
+ 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,
+ 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,
+ 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,
+ 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,
+ 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,
+ 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,
+ 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,
+ 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,
+ 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,
+ 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,
+ 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,
+ 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,
+ 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,
+ 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,
+ 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,
+ 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,
+ 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,
+ 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,
+ 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,
+ 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,
+ 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,
+ 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,
+ 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,
+ 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,
+ 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,
+ 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,
+ 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,
+ 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,
+ 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,
+ 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,
+ 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,
+ 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,
+ 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,
+ 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018,
+ 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,
+ 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,
+ 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,
+ 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,
+ 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,
+ 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,
+ 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,
+ 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,
+ 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,
+ 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,
+ 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,
+ 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,
+ 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,
+ 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289,
+ 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349,
+ 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409,
+ 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9,
+ 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589,
+ 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649,
+ 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709,
+ 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9,
+ 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79,
+ 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39,
+ 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9,
+ 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39,
+ 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9,
+ 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79,
+ 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39,
+ 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9,
+ 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059,
+ 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9,
+ 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239,
+ 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9,
+ 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399,
+ 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459,
+ 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309,
+ 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559,
+ 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9,
+ 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679,
+ 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9,
+ 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d,
+ 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9,
+ 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959,
+ 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d,
+ 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d,
+ 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9,
+ 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99,
+ 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9,
+ 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9,
+ 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99,
+ 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39,
+ 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639,
+ 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9,
+ 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d,
+ 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9,
+ 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d,
+ 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd,
+ 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979,
+ 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19,
+ 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d,
+ 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d,
+ 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99,
+ 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39,
+ 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9,
+ 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39,
+ 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd,
+ 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19,
+ 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9,
+ 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59,
+ 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd,
+ 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d,
+ 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d,
+ 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d,
+ 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879,
+ 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919,
+ 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd,
+ 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9,
+ 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99,
+ 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39,
+ 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9,
+ 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d,
+ 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19,
+ 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9,
+ 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59,
+ 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9,
+ 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d,
+ 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,
+ 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,
+ 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,
+ 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,
+ 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,
+ 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,
+}
+
+// idnaIndex: 36 blocks, 2304 entries, 4608 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2304]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,
+ // Block 0x4, offset 0x100
+ 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,
+ 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0,
+ 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5,
+ 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1,
+ 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8,
+ 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0,
+ 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe,
+ 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,
+ 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52,
+ 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e,
+ 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c,
+ 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba,
+ 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0x126, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x128, 0x3fd: 0x129, 0x3fe: 0xba, 0x3ff: 0xba,
+ // Block 0x10, offset 0x400
+ 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131,
+ 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a,
+ 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,
+ 0x428: 0x143, 0x429: 0x144, 0x42a: 0x145, 0x42b: 0x146, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x147, 0x431: 0x148, 0x432: 0x149, 0x433: 0xba, 0x434: 0x14a, 0x435: 0x14b, 0x436: 0x14c, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14d, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x14e, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x14f, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x150, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x151, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x152, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x146, 0x529: 0x153, 0x52a: 0xba, 0x52b: 0x154, 0x52c: 0x155, 0x52d: 0x156, 0x52e: 0x157, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0x158, 0x53a: 0x159, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15a, 0x53e: 0x15b, 0x53f: 0x15c,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x15d,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x15e, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x15f, 0x585: 0x160, 0x586: 0x9f, 0x587: 0x9f,
+ 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x161, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x162, 0x5b2: 0x163, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x164, 0x5c4: 0x165, 0x5c5: 0x166, 0x5c6: 0x167, 0x5c7: 0x168,
+ 0x5c8: 0x9b, 0x5c9: 0x169, 0x5ca: 0xba, 0x5cb: 0x16a, 0x5cc: 0x9b, 0x5cd: 0x16b, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,
+ 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x16c, 0x5e9: 0x16d, 0x5ea: 0x16e, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x16f, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x170, 0x624: 0x6f, 0x625: 0x171, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0x172, 0x632: 0x173, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x174, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x175, 0x641: 0x9b, 0x642: 0x176, 0x643: 0x177, 0x644: 0x73, 0x645: 0x74, 0x646: 0x178, 0x647: 0x179,
+ 0x648: 0x75, 0x649: 0x17a, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x17b, 0x65c: 0x9b, 0x65d: 0x17c, 0x65e: 0x9b, 0x65f: 0x17d,
+ 0x660: 0x17e, 0x661: 0x17f, 0x662: 0x180, 0x663: 0xba, 0x664: 0x181, 0x665: 0x182, 0x666: 0x183, 0x667: 0x184,
+ 0x668: 0xba, 0x669: 0x185, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x186, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x187, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x188, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x189, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,
+ 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,
+ 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,
+ 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,
+ 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,
+ 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x18a,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,
+ 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,
+ 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,
+ 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,
+ 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x18b, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x18c, 0x7a7: 0x7b,
+ 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,
+ 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,
+ 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,
+ // Block 0x1f, offset 0x7c0
+ 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,
+ 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,
+ 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,
+ 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,
+ 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,
+ 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,
+ 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,
+ 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,
+ 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,
+ 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x18d, 0x841: 0x18e, 0x842: 0xba, 0x843: 0xba, 0x844: 0x18f, 0x845: 0x18f, 0x846: 0x18f, 0x847: 0x190,
+ 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,
+ 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,
+ 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,
+ 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,
+ 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,
+ 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,
+ 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+ 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,
+ 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,
+ 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,
+ 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,
+ 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,
+ 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+}
+
+// idnaSparseOffset: 276 entries, 552 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x269, 0x27a, 0x27e, 0x289, 0x28d, 0x296, 0x29e, 0x2a4, 0x2a9, 0x2ac, 0x2b0, 0x2b6, 0x2ba, 0x2be, 0x2c2, 0x2c7, 0x2cd, 0x2d5, 0x2dc, 0x2e7, 0x2f1, 0x2f5, 0x2f8, 0x2fe, 0x302, 0x304, 0x307, 0x309, 0x30c, 0x316, 0x319, 0x328, 0x32c, 0x331, 0x334, 0x338, 0x33d, 0x342, 0x348, 0x34e, 0x35d, 0x363, 0x367, 0x376, 0x37b, 0x383, 0x38d, 0x398, 0x3a0, 0x3b1, 0x3ba, 0x3ca, 0x3d7, 0x3e1, 0x3e6, 0x3f3, 0x3f7, 0x3fc, 0x3fe, 0x402, 0x404, 0x408, 0x411, 0x417, 0x41b, 0x42b, 0x435, 0x43a, 0x43d, 0x443, 0x44a, 0x44f, 0x453, 0x459, 0x45e, 0x467, 0x46c, 0x472, 0x479, 0x480, 0x487, 0x48b, 0x490, 0x493, 0x498, 0x4a4, 0x4aa, 0x4af, 0x4b6, 0x4be, 0x4c3, 0x4c7, 0x4d7, 0x4de, 0x4e2, 0x4e6, 0x4ed, 0x4ef, 0x4f2, 0x4f5, 0x4f9, 0x502, 0x506, 0x50e, 0x516, 0x51c, 0x525, 0x531, 0x538, 0x541, 0x54b, 0x552, 0x560, 0x56d, 0x57a, 0x583, 0x587, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5eb, 0x5ee, 0x5f3, 0x5fe, 0x607, 0x613, 0x616, 0x620, 0x629, 0x635, 0x642, 0x64f, 0x65d, 0x664, 0x667, 0x66c, 0x66f, 0x672, 0x675, 0x67c, 0x683, 0x687, 0x692, 0x695, 0x698, 0x69b, 0x6a1, 0x6a6, 0x6aa, 0x6ad, 0x6b0, 0x6b3, 0x6b6, 0x6b9, 0x6be, 0x6c8, 0x6cb, 0x6cf, 0x6de, 0x6ea, 0x6ee, 0x6f3, 0x6f7, 0x6fc, 0x700, 0x705, 0x70e, 0x719, 0x71f, 0x727, 0x72a, 0x72d, 0x731, 0x735, 0x73b, 0x741, 0x746, 0x749, 0x759, 0x760, 0x763, 0x766, 0x76a, 0x770, 0x775, 0x77a, 0x782, 0x787, 0x78b, 0x78f, 0x792, 0x795, 0x799, 0x79d, 0x7a0, 0x7b0, 0x7c1, 0x7c6, 0x7c8, 0x7ca}
+
+// idnaSparseValues: 1997 entries, 7988 bytes
+var idnaSparseValues = [1997]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x6, offset 0x33
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xae},
+ {value: 0x0808, lo: 0xaf, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x62
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbf},
+ // Block 0xc, offset 0x6c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x78
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xe, offset 0x86
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x94
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb2
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbe
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xca
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xdb
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x10a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x111
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12b
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x143
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x145
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x14a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x150
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x152
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x169
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x171
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x177
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x182
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x187
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x18a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x194
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x199
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x34, offset 0x1d3
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1de
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1eb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fe
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x206
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x216
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x222
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3e, offset 0x224
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23a
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x246
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x252
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x25a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x25f
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0x45, offset 0x269
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x46, offset 0x27a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x27e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x289
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x28d
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x296
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x29e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09c5, lo: 0xa9, hi: 0xa9},
+ {value: 0x09e5, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2a9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2ac
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2b0
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e66, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e86, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2b6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2ba
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2be
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xbf},
+ // Block 0x53, offset 0x2c2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x54, offset 0x2c7
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ea5, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2cd
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x56, offset 0x2d5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2dc
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x58, offset 0x2e7
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x59, offset 0x2f1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x5a, offset 0x2f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0xbf},
+ // Block 0x5b, offset 0x2f8
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0edd, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5c, offset 0x2fe
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0efd, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5d, offset 0x302
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f1d, lo: 0x80, hi: 0xbf},
+ // Block 0x5e, offset 0x304
+ {value: 0x0020, lo: 0x02},
+ {value: 0x171d, lo: 0x80, hi: 0x8f},
+ {value: 0x18fd, lo: 0x90, hi: 0xbf},
+ // Block 0x5f, offset 0x307
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1efd, lo: 0x80, hi: 0xbf},
+ // Block 0x60, offset 0x309
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x61, offset 0x30c
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x62, offset 0x316
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x63, offset 0x319
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb0},
+ {value: 0x2a1d, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a3d, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a5d, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a7d, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a5d, lo: 0xb5, hi: 0xb5},
+ {value: 0x2a9d, lo: 0xb6, hi: 0xb6},
+ {value: 0x2abd, lo: 0xb7, hi: 0xb7},
+ {value: 0x2add, lo: 0xb8, hi: 0xb9},
+ {value: 0x2afd, lo: 0xba, hi: 0xbb},
+ {value: 0x2b1d, lo: 0xbc, hi: 0xbd},
+ {value: 0x2afd, lo: 0xbe, hi: 0xbf},
+ // Block 0x64, offset 0x328
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x65, offset 0x32c
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x66, offset 0x331
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x67, offset 0x334
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x68, offset 0x338
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x69, offset 0x33d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6a, offset 0x342
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6b, offset 0x348
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6c, offset 0x34e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6d, offset 0x35d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6e, offset 0x363
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6f, offset 0x367
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x70, offset 0x376
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x71, offset 0x37b
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x72, offset 0x383
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x73, offset 0x38d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x74, offset 0x398
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x75, offset 0x3a0
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x76, offset 0x3b1
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x77, offset 0x3ba
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x78, offset 0x3ca
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x79, offset 0x3d7
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x4465, lo: 0x9c, hi: 0x9c},
+ {value: 0x447d, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xaf},
+ {value: 0x4495, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3e1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44b5, lo: 0x80, hi: 0x8f},
+ {value: 0x44d5, lo: 0x90, hi: 0x9f},
+ {value: 0x44f5, lo: 0xa0, hi: 0xaf},
+ {value: 0x44d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3e6
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7c, offset 0x3f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7d, offset 0x3f7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7e, offset 0x3fc
+ {value: 0x0020, lo: 0x01},
+ {value: 0x4515, lo: 0x80, hi: 0xbf},
+ // Block 0x7f, offset 0x3fe
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d15, lo: 0x80, hi: 0x94},
+ {value: 0x4ad5, lo: 0x95, hi: 0x95},
+ {value: 0x4fb5, lo: 0x96, hi: 0xbf},
+ // Block 0x80, offset 0x402
+ {value: 0x0020, lo: 0x01},
+ {value: 0x54f5, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x404
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5cf5, lo: 0x80, hi: 0x84},
+ {value: 0x5655, lo: 0x85, hi: 0x85},
+ {value: 0x5d95, lo: 0x86, hi: 0xbf},
+ // Block 0x82, offset 0x408
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b55, lo: 0x80, hi: 0x8f},
+ {value: 0x6d15, lo: 0x90, hi: 0x90},
+ {value: 0x6d55, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70b5, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x70d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x83, offset 0x411
+ {value: 0x0020, lo: 0x05},
+ {value: 0x72d5, lo: 0x80, hi: 0xad},
+ {value: 0x6535, lo: 0xae, hi: 0xae},
+ {value: 0x7895, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f55, lo: 0xb6, hi: 0xb6},
+ {value: 0x7975, lo: 0xb7, hi: 0xbf},
+ // Block 0x84, offset 0x417
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x85, offset 0x41b
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x86, offset 0x42b
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x87, offset 0x435
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x88, offset 0x43a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x89, offset 0x43d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8a, offset 0x443
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8b, offset 0x44a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8c, offset 0x44f
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8d, offset 0x453
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8e, offset 0x459
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8f, offset 0x45e
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x90, offset 0x467
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x91, offset 0x46c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x92, offset 0x472
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8ad5, lo: 0x98, hi: 0x9f},
+ {value: 0x8aed, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x93, offset 0x479
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8aed, lo: 0xb0, hi: 0xb7},
+ {value: 0x8ad5, lo: 0xb8, hi: 0xbf},
+ // Block 0x94, offset 0x480
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x95, offset 0x487
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x96, offset 0x48b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x97, offset 0x490
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x98, offset 0x493
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x99, offset 0x498
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9a, offset 0x4a4
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9b, offset 0x4aa
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9c, offset 0x4af
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9d, offset 0x4b6
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9e, offset 0x4be
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9f, offset 0x4c3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa0, offset 0x4c7
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa1, offset 0x4d7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa2, offset 0x4de
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa3, offset 0x4e2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa4, offset 0x4e6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa5, offset 0x4ed
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa6, offset 0x4ef
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa7, offset 0x4f2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa8, offset 0x4f5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa9, offset 0x4f9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0908, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0xa1},
+ {value: 0x0c08, lo: 0xa2, hi: 0xa2},
+ {value: 0x0a08, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xaa, offset 0x502
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xab, offset 0x506
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0xa6},
+ {value: 0x0808, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb3},
+ {value: 0x0a08, lo: 0xb4, hi: 0xbf},
+ // Block 0xac, offset 0x50e
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x84},
+ {value: 0x0808, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x90},
+ {value: 0x0a18, lo: 0x91, hi: 0x93},
+ {value: 0x0c18, lo: 0x94, hi: 0x94},
+ {value: 0x0818, lo: 0x95, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xad, offset 0x516
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xae, offset 0x51c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xaf, offset 0x525
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xb0, offset 0x531
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb1, offset 0x538
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb2, offset 0x541
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb3, offset 0x54b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb4, offset 0x552
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb5, offset 0x560
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb6, offset 0x56d
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb7, offset 0x57a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb8, offset 0x583
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb9, offset 0x587
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xba, offset 0x596
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xbb, offset 0x59e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbc, offset 0x5a9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbd, offset 0x5b2
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbe, offset 0x5b8
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbf, offset 0x5c0
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xc0, offset 0x5c9
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xc1, offset 0x5d3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc2, offset 0x5d6
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc3, offset 0x5e2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xc4, offset 0x5eb
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc5, offset 0x5ee
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5f3
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xc7, offset 0x5fe
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xc8, offset 0x607
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xc9, offset 0x613
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xca, offset 0x616
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xcb, offset 0x620
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xcc, offset 0x629
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xcd, offset 0x635
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xce, offset 0x642
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xcf, offset 0x64f
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x3008, lo: 0x93, hi: 0x94},
+ {value: 0x3308, lo: 0x95, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x96},
+ {value: 0x3b08, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xbf},
+ // Block 0xd0, offset 0x65d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd1, offset 0x664
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xd2, offset 0x667
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xd3, offset 0x66c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xd4, offset 0x66f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0xd5, offset 0x672
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xd6, offset 0x675
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xd7, offset 0x67c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xd8, offset 0x683
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xd9, offset 0x687
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xda, offset 0x692
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xdb, offset 0x695
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xdc, offset 0x698
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0xdd, offset 0x69b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xde, offset 0x6a1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xdf, offset 0x6a6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xe0, offset 0x6aa
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe1, offset 0x6ad
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xe2, offset 0x6b0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe3, offset 0x6b3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xe4, offset 0x6b6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xe5, offset 0x6b9
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xe6, offset 0x6be
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xe7, offset 0x6c8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xe8, offset 0x6cb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xe9, offset 0x6cf
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xea, offset 0x6de
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xeb, offset 0x6ea
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xec, offset 0x6ee
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xed, offset 0x6f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xee, offset 0x6f7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xef, offset 0x6fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xf0, offset 0x700
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xf1, offset 0x705
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xf2, offset 0x70e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xf3, offset 0x719
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xf4, offset 0x71f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xf5, offset 0x727
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xb0},
+ {value: 0x0818, lo: 0xb1, hi: 0xbf},
+ // Block 0xf6, offset 0x72a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0818, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xf7, offset 0x72d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xf8, offset 0x731
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0xf9, offset 0x735
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0xfa, offset 0x73b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xfb, offset 0x741
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1c1, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xfc, offset 0x746
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0xfd, offset 0x749
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xc7e9, lo: 0x80, hi: 0x80},
+ {value: 0xc839, lo: 0x81, hi: 0x81},
+ {value: 0xc889, lo: 0x82, hi: 0x82},
+ {value: 0xc8d9, lo: 0x83, hi: 0x83},
+ {value: 0xc929, lo: 0x84, hi: 0x84},
+ {value: 0xc979, lo: 0x85, hi: 0x85},
+ {value: 0xc9c9, lo: 0x86, hi: 0x86},
+ {value: 0xca19, lo: 0x87, hi: 0x87},
+ {value: 0xca69, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcab9, lo: 0x90, hi: 0x90},
+ {value: 0xcad9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0xfe, offset 0x759
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xff, offset 0x760
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x100, offset 0x763
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0xbf},
+ // Block 0x101, offset 0x766
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x102, offset 0x76a
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x103, offset 0x770
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x104, offset 0x775
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x105, offset 0x77a
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x106, offset 0x782
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x107, offset 0x787
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x108, offset 0x78b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x109, offset 0x78f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x10a, offset 0x792
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x10b, offset 0x795
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x10c, offset 0x799
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x10d, offset 0x79d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x10e, offset 0x7a0
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xdeb9, lo: 0x80, hi: 0x89},
+ {value: 0x8dfd, lo: 0x8a, hi: 0x8a},
+ {value: 0xdff9, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e1d, lo: 0x9d, hi: 0x9d},
+ {value: 0xe239, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e3d, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2d9, lo: 0xa4, hi: 0xab},
+ {value: 0x7ed5, lo: 0xac, hi: 0xac},
+ {value: 0xe3d9, lo: 0xad, hi: 0xaf},
+ {value: 0x8e5d, lo: 0xb0, hi: 0xb0},
+ {value: 0xe439, lo: 0xb1, hi: 0xb6},
+ {value: 0x8e7d, lo: 0xb7, hi: 0xb9},
+ {value: 0xe4f9, lo: 0xba, hi: 0xba},
+ {value: 0x8edd, lo: 0xbb, hi: 0xbb},
+ {value: 0xe519, lo: 0xbc, hi: 0xbf},
+ // Block 0x10f, offset 0x7b0
+ {value: 0x0020, lo: 0x10},
+ {value: 0x937d, lo: 0x80, hi: 0x80},
+ {value: 0xf099, lo: 0x81, hi: 0x86},
+ {value: 0x939d, lo: 0x87, hi: 0x8a},
+ {value: 0xd9f9, lo: 0x8b, hi: 0x8b},
+ {value: 0xf159, lo: 0x8c, hi: 0x96},
+ {value: 0x941d, lo: 0x97, hi: 0x97},
+ {value: 0xf2b9, lo: 0x98, hi: 0xa3},
+ {value: 0x943d, lo: 0xa4, hi: 0xa6},
+ {value: 0xf439, lo: 0xa7, hi: 0xaa},
+ {value: 0x949d, lo: 0xab, hi: 0xab},
+ {value: 0xf4b9, lo: 0xac, hi: 0xac},
+ {value: 0x94bd, lo: 0xad, hi: 0xad},
+ {value: 0xf4d9, lo: 0xae, hi: 0xaf},
+ {value: 0x94dd, lo: 0xb0, hi: 0xb1},
+ {value: 0xf519, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0x110, offset 0x7c1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x111, offset 0x7c6
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x112, offset 0x7c8
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x113, offset 0x7ca
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 42466 bytes (41KiB); checksum: 355A58A4
diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go
new file mode 100644
index 0000000..0600cd2
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables12.0.0.go
@@ -0,0 +1,4733 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+//go:build go1.14 && !go1.16
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "12.0.0"
+
+var mappings string = "" + // Size: 8178 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多" +
+ "\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販" +
+ "\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打" +
+ "\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕" +
+ "\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你" +
+ "\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內" +
+ "\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉" +
+ "\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟" +
+ "\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙" +
+ "\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型" +
+ "\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮" +
+ "\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍" +
+ "\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰" +
+ "\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹" +
+ "\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞" +
+ "\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢" +
+ "\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙" +
+ "\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓" +
+ "\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛" +
+ "\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派" +
+ "\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆" +
+ "\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀" +
+ "\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾" +
+ "\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌" +
+ "\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒" +
+ "\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺" +
+ "\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋" +
+ "\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著" +
+ "\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜" +
+ "\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠" +
+ "\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁" +
+ "\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘" +
+ "\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲" +
+ "\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭" +
+ "\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4862 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" +
+ "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" +
+ "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" +
+ "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" +
+ "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" +
+ "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" +
+ "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" +
+ "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" +
+ "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" +
+ "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" +
+ "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" +
+ "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" +
+ "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" +
+ "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" +
+ "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" +
+ "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" +
+ "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" +
+ "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" +
+ "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" +
+ "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" +
+ "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" +
+ "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" +
+ "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" +
+ "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" +
+ "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" +
+ "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" +
+ "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" +
+ "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" +
+ "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" +
+ "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" +
+ "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" +
+ "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" +
+ "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" +
+ "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" +
+ "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" +
+ "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" +
+ "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" +
+ "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" +
+ "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" +
+ "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" +
+ "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" +
+ "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" +
+ "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" +
+ "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" +
+ "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" +
+ "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" +
+ "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" +
+ "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" +
+ "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." +
+ "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" +
+ "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" +
+ "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" +
+ "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" +
+ "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" +
+ "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" +
+ "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" +
+ "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03" +
+ "\x09\x0c\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06" +
+ "!3\x03\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05" +
+ "\x03\x07<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" +
+ "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" +
+ "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" +
+ "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" +
+ "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" +
+ "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" +
+ ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" +
+ "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" +
+ "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" +
+ "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" +
+ "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" +
+ "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" +
+ "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" +
+ "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" +
+ "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" +
+ "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" +
+ "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" +
+ "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" +
+ ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 29708 bytes (29.01 KiB). Checksum: c3ecc76d8fffa6e6.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 125:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 125
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 127 blocks, 8128 entries, 16256 bytes
+// The third block is the zero block.
+var idnaValues = [8128]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,
+ 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,
+ 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,
+ 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0018, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0008, 0x90d: 0x0008, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008,
+ 0x912: 0x0008, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,
+ 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,
+ 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,
+ 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,
+ 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,
+ 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,
+ 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,
+ 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9,
+ 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099,
+ 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,
+ 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,
+ 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,
+ 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,
+ 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251,
+ 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,
+ 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,
+ 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,
+ 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,
+ 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,
+ 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,
+ 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,
+ 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,
+ 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,
+ 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,
+ 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,
+ 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,
+ 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459,
+ 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489,
+ 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,
+ 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,
+ 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x0796,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,
+ 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,
+ 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5,
+ 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,
+ 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed,
+ 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,
+ 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,
+ 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866,
+ 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249,
+ 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,
+ 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018,
+ 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,
+ 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,
+ 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,
+ 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5,
+ 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,
+ 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,
+ 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,
+ 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,
+ 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439,
+ 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,
+ 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,
+ 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,
+ 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd,
+ 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,
+ 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,
+ 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,
+ 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e,
+ 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe,
+ 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,
+ 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e,
+ 0xd5e: 0x0b7e, 0xd5f: 0x0b9e, 0xd60: 0x0bbe, 0xd61: 0x0bde, 0xd62: 0x0bfe, 0xd63: 0x0c1e,
+ 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde,
+ 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e,
+ 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e,
+ 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,
+ 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,
+ 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,
+ 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,
+ 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,
+ 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,
+ 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,
+ 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,
+ 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5,
+ 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,
+ 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x2715, 0xe41: 0x2735, 0xe42: 0x2755, 0xe43: 0x2775, 0xe44: 0x2795, 0xe45: 0x27b5,
+ 0xe46: 0x27d5, 0xe47: 0x27f5, 0xe48: 0x2815, 0xe49: 0x2835, 0xe4a: 0x2855, 0xe4b: 0x2875,
+ 0xe4c: 0x2895, 0xe4d: 0x28b5, 0xe4e: 0x28d5, 0xe4f: 0x28f5, 0xe50: 0x2915, 0xe51: 0x2935,
+ 0xe52: 0x2955, 0xe53: 0x2975, 0xe54: 0x2995, 0xe55: 0x29b5, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29d5, 0xeb9: 0x29f5, 0xeba: 0x2a15, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b55, 0xec1: 0x2b75, 0xec2: 0x2b95, 0xec3: 0x2bb5, 0xec4: 0x2bd5, 0xec5: 0x2bf5,
+ 0xec6: 0x2bf5, 0xec7: 0x2bf5, 0xec8: 0x2c15, 0xec9: 0x2c15, 0xeca: 0x2c15, 0xecb: 0x2c15,
+ 0xecc: 0x2c35, 0xecd: 0x2c35, 0xece: 0x2c35, 0xecf: 0x2c55, 0xed0: 0x2c75, 0xed1: 0x2c75,
+ 0xed2: 0x2a95, 0xed3: 0x2a95, 0xed4: 0x2c75, 0xed5: 0x2c75, 0xed6: 0x2c95, 0xed7: 0x2c95,
+ 0xed8: 0x2c75, 0xed9: 0x2c75, 0xeda: 0x2a95, 0xedb: 0x2a95, 0xedc: 0x2c75, 0xedd: 0x2c75,
+ 0xede: 0x2c55, 0xedf: 0x2c55, 0xee0: 0x2cb5, 0xee1: 0x2cb5, 0xee2: 0x2cd5, 0xee3: 0x2cd5,
+ 0xee4: 0x0040, 0xee5: 0x2cf5, 0xee6: 0x2d15, 0xee7: 0x2d35, 0xee8: 0x2d35, 0xee9: 0x2d55,
+ 0xeea: 0x2d75, 0xeeb: 0x2d95, 0xeec: 0x2db5, 0xeed: 0x2dd5, 0xeee: 0x2df5, 0xeef: 0x2e15,
+ 0xef0: 0x2e35, 0xef1: 0x2e55, 0xef2: 0x2e55, 0xef3: 0x2e75, 0xef4: 0x2e95, 0xef5: 0x2e95,
+ 0xef6: 0x2eb5, 0xef7: 0x2ed5, 0xef8: 0x2e75, 0xef9: 0x2ef5, 0xefa: 0x2f15, 0xefb: 0x2ef5,
+ 0xefc: 0x2e75, 0xefd: 0x2f35, 0xefe: 0x2f55, 0xeff: 0x2f75,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f95, 0xf01: 0x2fb5, 0xf02: 0x2d15, 0xf03: 0x2cf5, 0xf04: 0x2fd5, 0xf05: 0x2ff5,
+ 0xf06: 0x3015, 0xf07: 0x3035, 0xf08: 0x3055, 0xf09: 0x3075, 0xf0a: 0x3095, 0xf0b: 0x30b5,
+ 0xf0c: 0x30d5, 0xf0d: 0x30f5, 0xf0e: 0x3115, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x3135, 0xf13: 0x3155, 0xf14: 0x3175, 0xf15: 0x3195, 0xf16: 0x31b5, 0xf17: 0x31d5,
+ 0xf18: 0x31f5, 0xf19: 0x3215, 0xf1a: 0x3235, 0xf1b: 0x3255, 0xf1c: 0x3175, 0xf1d: 0x3275,
+ 0xf1e: 0x3295, 0xf1f: 0x32b5, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,
+ 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5,
+ 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761,
+ 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,
+ 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,
+ 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5,
+ 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475,
+ 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535,
+ 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5,
+ 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5,
+ 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795,
+ 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855,
+ 0xf8c: 0x3875, 0xf8d: 0x3895, 0xf8e: 0x38b5, 0xf8f: 0x38d5, 0xf90: 0x38f5, 0xf91: 0x3915,
+ 0xf92: 0x3935, 0xf93: 0x3955, 0xf94: 0x3975, 0xf95: 0x3995, 0xf96: 0x39b5, 0xf97: 0x39d5,
+ 0xf98: 0x39f5, 0xf99: 0x3a15, 0xf9a: 0x3a35, 0xf9b: 0x3a55, 0xf9c: 0x3a75, 0xf9d: 0x3a95,
+ 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55,
+ 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5,
+ 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95,
+ 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,
+ 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,
+ 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,
+ 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,
+ 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d,
+ 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d,
+ 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05,
+ 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95,
+ 0xfe4: 0x3ead, 0xfe5: 0x3ead, 0xfe6: 0x3ec5, 0xfe7: 0x3ec5, 0xfe8: 0x3edd, 0xfe9: 0x3edd,
+ 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55,
+ 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5,
+ 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015,
+ 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x0040,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,
+ 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,
+ 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,
+ 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,
+ 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,
+ 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,
+ 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,
+ 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,
+ 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,
+ 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,
+ 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,
+ 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,
+ 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,
+ 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,
+ 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,
+ 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,
+ 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,
+ 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,
+ 0x1070: 0x6009, 0x1071: 0x4045, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x4065, 0x1075: 0x6069,
+ 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60b1, 0x107b: 0x60c9,
+ 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x40a5, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,
+ 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40c5, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,
+ 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x4105,
+ 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6359, 0x1097: 0x6371,
+ 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x41a5, 0x109c: 0x63d1, 0x109d: 0x63e9,
+ 0x109e: 0x6401, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6419, 0x10a2: 0x4205, 0x10a3: 0x4225,
+ 0x10a4: 0x4245, 0x10a5: 0x6431, 0x10a6: 0x4265, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,
+ 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64b1, 0x10af: 0x64f1,
+ 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x4305, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,
+ 0x10b6: 0x4325, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,
+ 0x10bc: 0x4345, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,
+ 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,
+ 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,
+ 0x10d2: 0x43a5, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67b1,
+ 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,
+ 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,
+ 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,
+ 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,
+ 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,
+ 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,
+ 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x4425,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,
+ 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,
+ 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,
+ 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,
+ 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,
+ 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,
+ 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,
+ 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,
+ 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,
+ 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,
+ 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,
+ 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,
+ 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,
+ 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,
+ 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,
+ 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,
+ 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,
+ 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,
+ 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,
+ 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,
+ 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,
+ 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,
+ 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008,
+ 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008,
+ 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008,
+ 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad,
+ 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d,
+ 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008,
+ 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d,
+ 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d,
+ 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d,
+ 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d,
+ 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed,
+ 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d,
+ 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d,
+ 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,
+ 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,
+ 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7001,
+ 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,
+ 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,
+ 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,
+ 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,
+ 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,
+ 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,
+ 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,
+ 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,
+ 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,
+ 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,
+ 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,
+ 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,
+ 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,
+ 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,
+ 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,
+ 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,
+ 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,
+ 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,
+ 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,
+ 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,
+ 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,
+ 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,
+ 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,
+ 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,
+ 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,
+ 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,
+ 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,
+ 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,
+ 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,
+ 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,
+ 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,
+ 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,
+ 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,
+ 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,
+ 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,
+ 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,
+ 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,
+ 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,
+ 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,
+ 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,
+ 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,
+ 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,
+ 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,
+ 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,
+ 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,
+ 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,
+ 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,
+ 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,
+ 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,
+ 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,
+ 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,
+ 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,
+ 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,
+ 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,
+ 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,
+ 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,
+ 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,
+ 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,
+ 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,
+ 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,
+ 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,
+ 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,
+ 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,
+ 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,
+ 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,
+ 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,
+ 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,
+ 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,
+ 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,
+ 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,
+ 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,
+ 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,
+ 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,
+ 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,
+ 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,
+ 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,
+ 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,
+ 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,
+ 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,
+ 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,
+ 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,
+ 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,
+ 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,
+ 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d8d,
+ 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7dad,
+ 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,
+ 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d,
+ 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e,
+ 0x158c: 0x7fae, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7fcd,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,
+ 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7ecd,
+ 0x159e: 0x7f2d, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,
+ 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,
+ 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x800e, 0x15b1: 0xb009, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040,
+ 0x15b6: 0x806e, 0x15b7: 0xb031, 0x15b8: 0x808e, 0x15b9: 0xb059, 0x15ba: 0x80ae, 0x15bb: 0xb081,
+ 0x15bc: 0x80ce, 0x15bd: 0xb0a9, 0x15be: 0x80ee, 0x15bf: 0xb0d1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,
+ 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,
+ 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,
+ 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,
+ 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,
+ 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,
+ 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,
+ 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,
+ 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,
+ 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,
+ 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,
+ 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,
+ 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,
+ 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,
+ 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,
+ 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,
+ 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,
+ 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,
+ 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,
+ 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,
+ 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,
+ 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,
+ 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,
+ 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,
+ 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,
+ 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,
+ 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,
+ 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,
+ 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,
+ 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,
+ 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,
+ 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,
+ 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,
+ 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,
+ 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,
+ 0x169e: 0xb532, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d,
+ 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d,
+ 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd,
+ 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d,
+ 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d,
+ 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d,
+ 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd,
+ 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d,
+ 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d,
+ 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d,
+ 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d,
+ 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed,
+ 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d,
+ 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed,
+ 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d,
+ 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d,
+ 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d,
+ 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x8a0e,
+ 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d,
+ 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,
+ 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,
+ 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,
+ 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,
+ 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,
+ 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,
+ 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,
+ 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,
+ 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,
+ 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,
+ 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,
+ 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,
+ 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,
+ 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,
+ 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,
+ 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,
+ 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308,
+ 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,
+ 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,
+ 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,
+ 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,
+ 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,
+ 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,
+ 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,
+ 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,
+ 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,
+ 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,
+ 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,
+ 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,
+ 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,
+ 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,
+ 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,
+ 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,
+ 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,
+ 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,
+ 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,
+ 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,
+ 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,
+ 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,
+ 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,
+ 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,
+ 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,
+ 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,
+ 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,
+ 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,
+ 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,
+ 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,
+ 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,
+ 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,
+ 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,
+ 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,
+ 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,
+ 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,
+ 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,
+ 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,
+ 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,
+ 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,
+ 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,
+ 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,
+ 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,
+ 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,
+ 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,
+ 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,
+ 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,
+ 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,
+ 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,
+ 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,
+ 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,
+ 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,
+ 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,
+ 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,
+ 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,
+ 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,
+ 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,
+ 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,
+ 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,
+ 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,
+ 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,
+ 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,
+ 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,
+ 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,
+ 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,
+ 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,
+ 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,
+ 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,
+ 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,
+ 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,
+ 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,
+ 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,
+ 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,
+ 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,
+ 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,
+ 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,
+ 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,
+ 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,
+ 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,
+ 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,
+ 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,
+ 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,
+ 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,
+ 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,
+ 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,
+ 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,
+ 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,
+ 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,
+ 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,
+ 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,
+ 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,
+ 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,
+ 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,
+ 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,
+ 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,
+ 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,
+ 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,
+ 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,
+ 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,
+ 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,
+ 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,
+ 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,
+ 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,
+ 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,
+ 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,
+ 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,
+ 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,
+ 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,
+ 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,
+ 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,
+ 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,
+ 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,
+ 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,
+ 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,
+ 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,
+ 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,
+ 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,
+ 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,
+ 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,
+ 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,
+ 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,
+ 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,
+ 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,
+ 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,
+ 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,
+ 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,
+ 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,
+ 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,
+ 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,
+ 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,
+ 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,
+ 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,
+ 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,
+ 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,
+ 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,
+ 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,
+ 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,
+ 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,
+ 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,
+ 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,
+ 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,
+ 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,
+ 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,
+ 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,
+ 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,
+ 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,
+ 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,
+ 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,
+ 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,
+ 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,
+ 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,
+ 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,
+ 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,
+ 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,
+ 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,
+ 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,
+ 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,
+ 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,
+ 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,
+ 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,
+ 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,
+ 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,
+ 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,
+ 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,
+ 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,
+ 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,
+ 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,
+ 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,
+ 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,
+ 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,
+ 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,
+ 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,
+ 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,
+ 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,
+ 0x1c5e: 0x8b3d, 0x1c5f: 0x8b3d, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,
+ 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,
+ 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,
+ 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,
+ 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,
+ 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,
+ 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,
+ 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,
+ 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,
+ 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,
+ 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,
+ 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,
+ 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,
+ 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,
+ 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,
+ 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,
+ 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,
+ 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,
+ 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,
+ 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,
+ 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,
+ 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,
+ 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,
+ 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,
+ 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,
+ 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,
+ 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,
+ 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,
+ 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,
+ 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,
+ 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018,
+ 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,
+ 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,
+ 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,
+ 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,
+ 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,
+ 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,
+ 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,
+ 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,
+ 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,
+ 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0xc1c1, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,
+ 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,
+ 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xc1f1, 0x1dc1: 0xc229, 0x1dc2: 0xc261, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,
+ 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,
+ 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc281, 0x1dd1: 0xc2a1,
+ 0x1dd2: 0xc2c1, 0x1dd3: 0xc2e1, 0x1dd4: 0xc301, 0x1dd5: 0xc321, 0x1dd6: 0xc341, 0x1dd7: 0xc361,
+ 0x1dd8: 0xc381, 0x1dd9: 0xc3a1, 0x1dda: 0xc3c1, 0x1ddb: 0xc3e1, 0x1ddc: 0xc401, 0x1ddd: 0xc421,
+ 0x1dde: 0xc441, 0x1ddf: 0xc461, 0x1de0: 0xc481, 0x1de1: 0xc4a1, 0x1de2: 0xc4c1, 0x1de3: 0xc4e1,
+ 0x1de4: 0xc501, 0x1de5: 0xc521, 0x1de6: 0xc541, 0x1de7: 0xc561, 0x1de8: 0xc581, 0x1de9: 0xc5a1,
+ 0x1dea: 0xc5c1, 0x1deb: 0xc5e1, 0x1dec: 0xc601, 0x1ded: 0xc621, 0x1dee: 0xc641, 0x1def: 0xc661,
+ 0x1df0: 0xc681, 0x1df1: 0xc6a1, 0x1df2: 0xc6c1, 0x1df3: 0xc6e1, 0x1df4: 0xc701, 0x1df5: 0xc721,
+ 0x1df6: 0xc741, 0x1df7: 0xc761, 0x1df8: 0xc781, 0x1df9: 0xc7a1, 0x1dfa: 0xc7c1, 0x1dfb: 0xc7e1,
+ 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xcb11, 0x1e01: 0xcb31, 0x1e02: 0xcb51, 0x1e03: 0x8b55, 0x1e04: 0xcb71, 0x1e05: 0xcb91,
+ 0x1e06: 0xcbb1, 0x1e07: 0xcbd1, 0x1e08: 0xcbf1, 0x1e09: 0xcc11, 0x1e0a: 0xcc31, 0x1e0b: 0xcc51,
+ 0x1e0c: 0xcc71, 0x1e0d: 0x8b75, 0x1e0e: 0xcc91, 0x1e0f: 0xccb1, 0x1e10: 0xccd1, 0x1e11: 0xccf1,
+ 0x1e12: 0x8b95, 0x1e13: 0xcd11, 0x1e14: 0xcd31, 0x1e15: 0xc441, 0x1e16: 0x8bb5, 0x1e17: 0xcd51,
+ 0x1e18: 0xcd71, 0x1e19: 0xcd91, 0x1e1a: 0xcdb1, 0x1e1b: 0xcdd1, 0x1e1c: 0x8bd5, 0x1e1d: 0xcdf1,
+ 0x1e1e: 0xce11, 0x1e1f: 0xce31, 0x1e20: 0xce51, 0x1e21: 0xce71, 0x1e22: 0xc7a1, 0x1e23: 0xce91,
+ 0x1e24: 0xceb1, 0x1e25: 0xced1, 0x1e26: 0xcef1, 0x1e27: 0xcf11, 0x1e28: 0xcf31, 0x1e29: 0xcf51,
+ 0x1e2a: 0xcf71, 0x1e2b: 0xcf91, 0x1e2c: 0xcfb1, 0x1e2d: 0xcfd1, 0x1e2e: 0xcff1, 0x1e2f: 0xd011,
+ 0x1e30: 0xd031, 0x1e31: 0xd051, 0x1e32: 0xd051, 0x1e33: 0xd051, 0x1e34: 0x8bf5, 0x1e35: 0xd071,
+ 0x1e36: 0xd091, 0x1e37: 0xd0b1, 0x1e38: 0x8c15, 0x1e39: 0xd0d1, 0x1e3a: 0xd0f1, 0x1e3b: 0xd111,
+ 0x1e3c: 0xd131, 0x1e3d: 0xd151, 0x1e3e: 0xd171, 0x1e3f: 0xd191,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd1b1, 0x1e41: 0xd1d1, 0x1e42: 0xd1f1, 0x1e43: 0xd211, 0x1e44: 0xd231, 0x1e45: 0xd251,
+ 0x1e46: 0xd251, 0x1e47: 0xd271, 0x1e48: 0xd291, 0x1e49: 0xd2b1, 0x1e4a: 0xd2d1, 0x1e4b: 0xd2f1,
+ 0x1e4c: 0xd311, 0x1e4d: 0xd331, 0x1e4e: 0xd351, 0x1e4f: 0xd371, 0x1e50: 0xd391, 0x1e51: 0xd3b1,
+ 0x1e52: 0xd3d1, 0x1e53: 0xd3f1, 0x1e54: 0xd411, 0x1e55: 0xd431, 0x1e56: 0xd451, 0x1e57: 0xd471,
+ 0x1e58: 0xd491, 0x1e59: 0x8c35, 0x1e5a: 0xd4b1, 0x1e5b: 0xd4d1, 0x1e5c: 0xd4f1, 0x1e5d: 0xc321,
+ 0x1e5e: 0xd511, 0x1e5f: 0xd531, 0x1e60: 0x8c55, 0x1e61: 0x8c75, 0x1e62: 0xd551, 0x1e63: 0xd571,
+ 0x1e64: 0xd591, 0x1e65: 0xd5b1, 0x1e66: 0xd5d1, 0x1e67: 0xd5f1, 0x1e68: 0x2040, 0x1e69: 0xd611,
+ 0x1e6a: 0xd631, 0x1e6b: 0xd631, 0x1e6c: 0x8c95, 0x1e6d: 0xd651, 0x1e6e: 0xd671, 0x1e6f: 0xd691,
+ 0x1e70: 0xd6b1, 0x1e71: 0x8cb5, 0x1e72: 0xd6d1, 0x1e73: 0xd6f1, 0x1e74: 0x2040, 0x1e75: 0xd711,
+ 0x1e76: 0xd731, 0x1e77: 0xd751, 0x1e78: 0xd771, 0x1e79: 0xd791, 0x1e7a: 0xd7b1, 0x1e7b: 0x8cd5,
+ 0x1e7c: 0xd7d1, 0x1e7d: 0x8cf5, 0x1e7e: 0xd7f1, 0x1e7f: 0xd811,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xd831, 0x1e81: 0xd851, 0x1e82: 0xd871, 0x1e83: 0xd891, 0x1e84: 0xd8b1, 0x1e85: 0xd8d1,
+ 0x1e86: 0xd8f1, 0x1e87: 0xd911, 0x1e88: 0xd931, 0x1e89: 0x8d15, 0x1e8a: 0xd951, 0x1e8b: 0xd971,
+ 0x1e8c: 0xd991, 0x1e8d: 0xd9b1, 0x1e8e: 0xd9d1, 0x1e8f: 0x8d35, 0x1e90: 0xd9f1, 0x1e91: 0x8d55,
+ 0x1e92: 0x8d75, 0x1e93: 0xda11, 0x1e94: 0xda31, 0x1e95: 0xda31, 0x1e96: 0xda51, 0x1e97: 0x8d95,
+ 0x1e98: 0x8db5, 0x1e99: 0xda71, 0x1e9a: 0xda91, 0x1e9b: 0xdab1, 0x1e9c: 0xdad1, 0x1e9d: 0xdaf1,
+ 0x1e9e: 0xdb11, 0x1e9f: 0xdb31, 0x1ea0: 0xdb51, 0x1ea1: 0xdb71, 0x1ea2: 0xdb91, 0x1ea3: 0xdbb1,
+ 0x1ea4: 0x8dd5, 0x1ea5: 0xdbd1, 0x1ea6: 0xdbf1, 0x1ea7: 0xdc11, 0x1ea8: 0xdc31, 0x1ea9: 0xdc11,
+ 0x1eaa: 0xdc51, 0x1eab: 0xdc71, 0x1eac: 0xdc91, 0x1ead: 0xdcb1, 0x1eae: 0xdcd1, 0x1eaf: 0xdcf1,
+ 0x1eb0: 0xdd11, 0x1eb1: 0xdd31, 0x1eb2: 0xdd51, 0x1eb3: 0xdd71, 0x1eb4: 0xdd91, 0x1eb5: 0xddb1,
+ 0x1eb6: 0xddd1, 0x1eb7: 0xddf1, 0x1eb8: 0x8df5, 0x1eb9: 0xde11, 0x1eba: 0xde31, 0x1ebb: 0xde51,
+ 0x1ebc: 0xde71, 0x1ebd: 0xde91, 0x1ebe: 0x8e15, 0x1ebf: 0xdeb1,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xe5b1, 0x1ec1: 0xe5d1, 0x1ec2: 0xe5f1, 0x1ec3: 0xe611, 0x1ec4: 0xe631, 0x1ec5: 0xe651,
+ 0x1ec6: 0x8f35, 0x1ec7: 0xe671, 0x1ec8: 0xe691, 0x1ec9: 0xe6b1, 0x1eca: 0xe6d1, 0x1ecb: 0xe6f1,
+ 0x1ecc: 0xe711, 0x1ecd: 0x8f55, 0x1ece: 0xe731, 0x1ecf: 0xe751, 0x1ed0: 0x8f75, 0x1ed1: 0x8f95,
+ 0x1ed2: 0xe771, 0x1ed3: 0xe791, 0x1ed4: 0xe7b1, 0x1ed5: 0xe7d1, 0x1ed6: 0xe7f1, 0x1ed7: 0xe811,
+ 0x1ed8: 0xe831, 0x1ed9: 0xe851, 0x1eda: 0xe871, 0x1edb: 0x8fb5, 0x1edc: 0xe891, 0x1edd: 0x8fd5,
+ 0x1ede: 0xe8b1, 0x1edf: 0x2040, 0x1ee0: 0xe8d1, 0x1ee1: 0xe8f1, 0x1ee2: 0xe911, 0x1ee3: 0x8ff5,
+ 0x1ee4: 0xe931, 0x1ee5: 0xe951, 0x1ee6: 0x9015, 0x1ee7: 0x9035, 0x1ee8: 0xe971, 0x1ee9: 0xe991,
+ 0x1eea: 0xe9b1, 0x1eeb: 0xe9d1, 0x1eec: 0xe9f1, 0x1eed: 0xe9f1, 0x1eee: 0xea11, 0x1eef: 0xea31,
+ 0x1ef0: 0xea51, 0x1ef1: 0xea71, 0x1ef2: 0xea91, 0x1ef3: 0xeab1, 0x1ef4: 0xead1, 0x1ef5: 0x9055,
+ 0x1ef6: 0xeaf1, 0x1ef7: 0x9075, 0x1ef8: 0xeb11, 0x1ef9: 0x9095, 0x1efa: 0xeb31, 0x1efb: 0x90b5,
+ 0x1efc: 0x90d5, 0x1efd: 0x90f5, 0x1efe: 0xeb51, 0x1eff: 0xeb71,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xeb91, 0x1f01: 0x9115, 0x1f02: 0x9135, 0x1f03: 0x9155, 0x1f04: 0x9175, 0x1f05: 0xebb1,
+ 0x1f06: 0xebd1, 0x1f07: 0xebd1, 0x1f08: 0xebf1, 0x1f09: 0xec11, 0x1f0a: 0xec31, 0x1f0b: 0xec51,
+ 0x1f0c: 0xec71, 0x1f0d: 0x9195, 0x1f0e: 0xec91, 0x1f0f: 0xecb1, 0x1f10: 0xecd1, 0x1f11: 0xecf1,
+ 0x1f12: 0x91b5, 0x1f13: 0xed11, 0x1f14: 0x91d5, 0x1f15: 0x91f5, 0x1f16: 0xed31, 0x1f17: 0xed51,
+ 0x1f18: 0xed71, 0x1f19: 0xed91, 0x1f1a: 0xedb1, 0x1f1b: 0xedd1, 0x1f1c: 0x9215, 0x1f1d: 0x9235,
+ 0x1f1e: 0x9255, 0x1f1f: 0x2040, 0x1f20: 0xedf1, 0x1f21: 0x9275, 0x1f22: 0xee11, 0x1f23: 0xee31,
+ 0x1f24: 0xee51, 0x1f25: 0x9295, 0x1f26: 0xee71, 0x1f27: 0xee91, 0x1f28: 0xeeb1, 0x1f29: 0xeed1,
+ 0x1f2a: 0xeef1, 0x1f2b: 0x92b5, 0x1f2c: 0xef11, 0x1f2d: 0xef31, 0x1f2e: 0xef51, 0x1f2f: 0xef71,
+ 0x1f30: 0xef91, 0x1f31: 0xefb1, 0x1f32: 0x92d5, 0x1f33: 0x92f5, 0x1f34: 0xefd1, 0x1f35: 0x9315,
+ 0x1f36: 0xeff1, 0x1f37: 0x9335, 0x1f38: 0xf011, 0x1f39: 0xf031, 0x1f3a: 0xf051, 0x1f3b: 0x9355,
+ 0x1f3c: 0x9375, 0x1f3d: 0xf071, 0x1f3e: 0x9395, 0x1f3f: 0xf091,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xf6d1, 0x1f41: 0xf6f1, 0x1f42: 0xf711, 0x1f43: 0xf731, 0x1f44: 0xf751, 0x1f45: 0x9555,
+ 0x1f46: 0xf771, 0x1f47: 0xf791, 0x1f48: 0xf7b1, 0x1f49: 0xf7d1, 0x1f4a: 0xf7f1, 0x1f4b: 0x9575,
+ 0x1f4c: 0x9595, 0x1f4d: 0xf811, 0x1f4e: 0xf831, 0x1f4f: 0xf851, 0x1f50: 0xf871, 0x1f51: 0xf891,
+ 0x1f52: 0xf8b1, 0x1f53: 0x95b5, 0x1f54: 0xf8d1, 0x1f55: 0xf8f1, 0x1f56: 0xf911, 0x1f57: 0xf931,
+ 0x1f58: 0x95d5, 0x1f59: 0x95f5, 0x1f5a: 0xf951, 0x1f5b: 0xf971, 0x1f5c: 0xf991, 0x1f5d: 0x9615,
+ 0x1f5e: 0xf9b1, 0x1f5f: 0xf9d1, 0x1f60: 0x684d, 0x1f61: 0x9635, 0x1f62: 0xf9f1, 0x1f63: 0xfa11,
+ 0x1f64: 0xfa31, 0x1f65: 0x9655, 0x1f66: 0xfa51, 0x1f67: 0xfa71, 0x1f68: 0xfa91, 0x1f69: 0xfab1,
+ 0x1f6a: 0xfad1, 0x1f6b: 0xfaf1, 0x1f6c: 0xfb11, 0x1f6d: 0x9675, 0x1f6e: 0xfb31, 0x1f6f: 0xfb51,
+ 0x1f70: 0xfb71, 0x1f71: 0x9695, 0x1f72: 0xfb91, 0x1f73: 0xfbb1, 0x1f74: 0xfbd1, 0x1f75: 0xfbf1,
+ 0x1f76: 0x7b6d, 0x1f77: 0x96b5, 0x1f78: 0xfc11, 0x1f79: 0xfc31, 0x1f7a: 0xfc51, 0x1f7b: 0x96d5,
+ 0x1f7c: 0xfc71, 0x1f7d: 0x96f5, 0x1f7e: 0xfc91, 0x1f7f: 0xfc91,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0xfcb1, 0x1f81: 0x9715, 0x1f82: 0xfcd1, 0x1f83: 0xfcf1, 0x1f84: 0xfd11, 0x1f85: 0xfd31,
+ 0x1f86: 0xfd51, 0x1f87: 0xfd71, 0x1f88: 0xfd91, 0x1f89: 0x9735, 0x1f8a: 0xfdb1, 0x1f8b: 0xfdd1,
+ 0x1f8c: 0xfdf1, 0x1f8d: 0xfe11, 0x1f8e: 0xfe31, 0x1f8f: 0xfe51, 0x1f90: 0x9755, 0x1f91: 0xfe71,
+ 0x1f92: 0x9775, 0x1f93: 0x9795, 0x1f94: 0x97b5, 0x1f95: 0xfe91, 0x1f96: 0xfeb1, 0x1f97: 0xfed1,
+ 0x1f98: 0xfef1, 0x1f99: 0xff11, 0x1f9a: 0xff31, 0x1f9b: 0xff51, 0x1f9c: 0xff71, 0x1f9d: 0x97d5,
+ 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,
+ 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,
+ 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,
+ 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,
+ 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,
+ 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,
+}
+
+// idnaIndex: 36 blocks, 2304 entries, 4608 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2304]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,
+ // Block 0x4, offset 0x100
+ 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,
+ 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0x9b,
+ 0x1b0: 0xd0, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4,
+ 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe0,
+ 0x1c8: 0xe1, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe2,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe5, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe6, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe7,
+ 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef,
+ 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf8, 0x31f: 0xf9,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd,
+ 0x3a8: 0x47, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,
+ 0x3b0: 0x100, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x101, 0x3b7: 0x52,
+ 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9f, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d,
+ 0x3d0: 0x10e, 0x3d1: 0x9f, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xba, 0x3e6: 0x11a, 0x3e7: 0x11b,
+ 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x5b, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5c, 0x3ef: 0xba,
+ 0x3f0: 0x121, 0x3f1: 0x122, 0x3f2: 0x123, 0x3f3: 0x124, 0x3f4: 0x125, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x127, 0x3fd: 0x128, 0x3fe: 0xba, 0x3ff: 0x129,
+ // Block 0x10, offset 0x400
+ 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131,
+ 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a,
+ 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0x143, 0x427: 0x144,
+ 0x428: 0x145, 0x429: 0x146, 0x42a: 0x147, 0x42b: 0x148, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x149, 0x431: 0x14a, 0x432: 0x14b, 0x433: 0xba, 0x434: 0x14c, 0x435: 0x14d, 0x436: 0x14e, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14f, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0x150,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x151, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x152, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x153, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x154, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x155, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x148, 0x529: 0x156, 0x52a: 0xba, 0x52b: 0x157, 0x52c: 0x158, 0x52d: 0x159, 0x52e: 0x15a, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0x15b, 0x53a: 0x15c, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15d, 0x53e: 0x15e, 0x53f: 0x15f,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x160,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x161, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x162, 0x585: 0x163, 0x586: 0x9f, 0x587: 0x9f,
+ 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x164, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x165, 0x5b2: 0x166, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x167, 0x5c4: 0x168, 0x5c5: 0x169, 0x5c6: 0x16a, 0x5c7: 0x16b,
+ 0x5c8: 0x9b, 0x5c9: 0x16c, 0x5ca: 0xba, 0x5cb: 0x16d, 0x5cc: 0x9b, 0x5cd: 0x16e, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,
+ 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x16f, 0x5e9: 0x170, 0x5ea: 0x171, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x172, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0x173, 0x605: 0x174, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0x175, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x121, 0x621: 0x121, 0x622: 0x121, 0x623: 0x176, 0x624: 0x6f, 0x625: 0x177, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0x178, 0x632: 0x179, 0x633: 0xba, 0x634: 0x17a, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x17b, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x17c, 0x641: 0x9b, 0x642: 0x17d, 0x643: 0x17e, 0x644: 0x73, 0x645: 0x74, 0x646: 0x17f, 0x647: 0x180,
+ 0x648: 0x75, 0x649: 0x181, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x182, 0x65c: 0x9b, 0x65d: 0x183, 0x65e: 0x9b, 0x65f: 0x184,
+ 0x660: 0x185, 0x661: 0x186, 0x662: 0x187, 0x663: 0xba, 0x664: 0x188, 0x665: 0x189, 0x666: 0x18a, 0x667: 0x18b,
+ 0x668: 0x9b, 0x669: 0x18c, 0x66a: 0x18d, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x18e, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x18f, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x190, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x191, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,
+ 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,
+ 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,
+ 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,
+ 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,
+ 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x192,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,
+ 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,
+ 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,
+ 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,
+ 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x193, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x194, 0x7a7: 0x7b,
+ 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,
+ 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,
+ 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,
+ // Block 0x1f, offset 0x7c0
+ 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,
+ 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,
+ 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,
+ 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,
+ 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,
+ 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,
+ 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,
+ 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,
+ 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,
+ 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x195, 0x841: 0x196, 0x842: 0xba, 0x843: 0xba, 0x844: 0x197, 0x845: 0x197, 0x846: 0x197, 0x847: 0x198,
+ 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,
+ 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,
+ 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,
+ 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,
+ 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,
+ 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,
+ 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+ 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,
+ 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,
+ 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,
+ 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,
+ 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,
+ 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+}
+
+// idnaSparseOffset: 284 entries, 568 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x26c, 0x27d, 0x281, 0x28c, 0x290, 0x299, 0x2a1, 0x2a7, 0x2ac, 0x2af, 0x2b3, 0x2b9, 0x2bd, 0x2c1, 0x2c5, 0x2cb, 0x2d3, 0x2da, 0x2e5, 0x2ef, 0x2f3, 0x2f6, 0x2fc, 0x300, 0x302, 0x305, 0x307, 0x30a, 0x314, 0x317, 0x326, 0x32a, 0x32f, 0x332, 0x336, 0x33b, 0x340, 0x346, 0x352, 0x361, 0x367, 0x36b, 0x37a, 0x37f, 0x387, 0x391, 0x39c, 0x3a4, 0x3b5, 0x3be, 0x3ce, 0x3db, 0x3e5, 0x3ea, 0x3f7, 0x3fb, 0x400, 0x402, 0x406, 0x408, 0x40c, 0x415, 0x41b, 0x41f, 0x42f, 0x439, 0x43e, 0x441, 0x447, 0x44e, 0x453, 0x457, 0x45d, 0x462, 0x46b, 0x470, 0x476, 0x47d, 0x484, 0x48b, 0x48f, 0x494, 0x497, 0x49c, 0x4a8, 0x4ae, 0x4b3, 0x4ba, 0x4c2, 0x4c7, 0x4cb, 0x4db, 0x4e2, 0x4e6, 0x4ea, 0x4f1, 0x4f3, 0x4f6, 0x4f9, 0x4fd, 0x506, 0x50a, 0x512, 0x51a, 0x51e, 0x524, 0x52d, 0x539, 0x540, 0x549, 0x553, 0x55a, 0x568, 0x575, 0x582, 0x58b, 0x58f, 0x59f, 0x5a7, 0x5b2, 0x5bb, 0x5c1, 0x5c9, 0x5d2, 0x5dd, 0x5e0, 0x5ec, 0x5f5, 0x5f8, 0x5fd, 0x602, 0x60f, 0x61a, 0x623, 0x62d, 0x630, 0x63a, 0x643, 0x64f, 0x65c, 0x669, 0x677, 0x67e, 0x682, 0x685, 0x68a, 0x68d, 0x692, 0x695, 0x69c, 0x6a3, 0x6a7, 0x6b2, 0x6b5, 0x6b8, 0x6bb, 0x6c1, 0x6c7, 0x6cd, 0x6d0, 0x6d3, 0x6d6, 0x6dd, 0x6e0, 0x6e5, 0x6ef, 0x6f2, 0x6f6, 0x705, 0x711, 0x715, 0x71a, 0x71e, 0x723, 0x727, 0x72c, 0x735, 0x740, 0x746, 0x74c, 0x752, 0x758, 0x761, 0x764, 0x767, 0x76b, 0x76f, 0x773, 0x779, 0x77f, 0x784, 0x787, 0x797, 0x79e, 0x7a1, 0x7a6, 0x7aa, 0x7b0, 0x7b5, 0x7b9, 0x7bf, 0x7c5, 0x7c9, 0x7d2, 0x7d7, 0x7da, 0x7dd, 0x7e1, 0x7e5, 0x7e8, 0x7f8, 0x809, 0x80e, 0x810, 0x812}
+
+// idnaSparseValues: 2069 entries, 8276 bytes
+var idnaSparseValues = [2069]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x6, offset 0x33
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xae},
+ {value: 0x0808, lo: 0xaf, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x62
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbf},
+ // Block 0xc, offset 0x6c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x78
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xe, offset 0x86
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x94
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb2
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbe
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xca
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xdb
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x10a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x111
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12b
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x143
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x145
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x14a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x150
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x152
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x169
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x171
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x177
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x182
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x187
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x18a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x194
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x199
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x34, offset 0x1d3
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1de
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1eb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fe
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x206
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x216
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x222
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3e, offset 0x224
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23a
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x246
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x252
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x25a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x25f
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x059d, lo: 0x90, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x059d, lo: 0xbd, hi: 0xbf},
+ // Block 0x45, offset 0x26c
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x46, offset 0x27d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x281
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x28c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x290
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x299
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x2a1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09dd, lo: 0xa9, hi: 0xa9},
+ {value: 0x09fd, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2ac
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2af
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2b3
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e7e, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e9e, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2b9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2bd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2c1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xbf},
+ // Block 0x53, offset 0x2c5
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ebd, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x54, offset 0x2cb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2d3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x56, offset 0x2da
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2e5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x58, offset 0x2ef
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x59, offset 0x2f3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0x5a, offset 0x2f6
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0ef5, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5b, offset 0x2fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0f15, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5c, offset 0x300
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f35, lo: 0x80, hi: 0xbf},
+ // Block 0x5d, offset 0x302
+ {value: 0x0020, lo: 0x02},
+ {value: 0x1735, lo: 0x80, hi: 0x8f},
+ {value: 0x1915, lo: 0x90, hi: 0xbf},
+ // Block 0x5e, offset 0x305
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1f15, lo: 0x80, hi: 0xbf},
+ // Block 0x5f, offset 0x307
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x60, offset 0x30a
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x61, offset 0x314
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x62, offset 0x317
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb0},
+ {value: 0x2a35, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a55, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a75, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a95, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a75, lo: 0xb5, hi: 0xb5},
+ {value: 0x2ab5, lo: 0xb6, hi: 0xb6},
+ {value: 0x2ad5, lo: 0xb7, hi: 0xb7},
+ {value: 0x2af5, lo: 0xb8, hi: 0xb9},
+ {value: 0x2b15, lo: 0xba, hi: 0xbb},
+ {value: 0x2b35, lo: 0xbc, hi: 0xbd},
+ {value: 0x2b15, lo: 0xbe, hi: 0xbf},
+ // Block 0x63, offset 0x326
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x64, offset 0x32a
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x65, offset 0x32f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x66, offset 0x332
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x67, offset 0x336
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x68, offset 0x33b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x69, offset 0x340
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6a, offset 0x346
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0xe00d, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x83},
+ {value: 0x03f5, lo: 0x84, hi: 0x84},
+ {value: 0x1329, lo: 0x85, hi: 0x85},
+ {value: 0x447d, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6b, offset 0x352
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6c, offset 0x361
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6d, offset 0x367
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6e, offset 0x36b
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x6f, offset 0x37a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x70, offset 0x37f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x71, offset 0x387
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x72, offset 0x391
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x73, offset 0x39c
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x74, offset 0x3a4
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x75, offset 0x3b5
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x76, offset 0x3be
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x77, offset 0x3ce
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x78, offset 0x3db
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x449d, lo: 0x9c, hi: 0x9c},
+ {value: 0x44b5, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x44cd, lo: 0xb0, hi: 0xbf},
+ // Block 0x79, offset 0x3e5
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44ed, lo: 0x80, hi: 0x8f},
+ {value: 0x450d, lo: 0x90, hi: 0x9f},
+ {value: 0x452d, lo: 0xa0, hi: 0xaf},
+ {value: 0x450d, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3ea
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7b, offset 0x3f7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7c, offset 0x3fb
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7d, offset 0x400
+ {value: 0x0020, lo: 0x01},
+ {value: 0x454d, lo: 0x80, hi: 0xbf},
+ // Block 0x7e, offset 0x402
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d4d, lo: 0x80, hi: 0x94},
+ {value: 0x4b0d, lo: 0x95, hi: 0x95},
+ {value: 0x4fed, lo: 0x96, hi: 0xbf},
+ // Block 0x7f, offset 0x406
+ {value: 0x0020, lo: 0x01},
+ {value: 0x552d, lo: 0x80, hi: 0xbf},
+ // Block 0x80, offset 0x408
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5d2d, lo: 0x80, hi: 0x84},
+ {value: 0x568d, lo: 0x85, hi: 0x85},
+ {value: 0x5dcd, lo: 0x86, hi: 0xbf},
+ // Block 0x81, offset 0x40c
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b8d, lo: 0x80, hi: 0x8f},
+ {value: 0x6d4d, lo: 0x90, hi: 0x90},
+ {value: 0x6d8d, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70ed, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x710d, lo: 0xb0, hi: 0xbf},
+ // Block 0x82, offset 0x415
+ {value: 0x0020, lo: 0x05},
+ {value: 0x730d, lo: 0x80, hi: 0xad},
+ {value: 0x656d, lo: 0xae, hi: 0xae},
+ {value: 0x78cd, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f8d, lo: 0xb6, hi: 0xb6},
+ {value: 0x79ad, lo: 0xb7, hi: 0xbf},
+ // Block 0x83, offset 0x41b
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x84, offset 0x41f
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x85, offset 0x42f
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x86, offset 0x439
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x87, offset 0x43e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x88, offset 0x441
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x89, offset 0x447
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8a, offset 0x44e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8b, offset 0x453
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8c, offset 0x457
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8d, offset 0x45d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8e, offset 0x462
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x8f, offset 0x46b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x90, offset 0x470
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x91, offset 0x476
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8b0d, lo: 0x98, hi: 0x9f},
+ {value: 0x8b25, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x92, offset 0x47d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8b25, lo: 0xb0, hi: 0xb7},
+ {value: 0x8b0d, lo: 0xb8, hi: 0xbf},
+ // Block 0x93, offset 0x484
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x94, offset 0x48b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x95, offset 0x48f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x96, offset 0x494
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x97, offset 0x497
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x98, offset 0x49c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x99, offset 0x4a8
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9a, offset 0x4ae
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9b, offset 0x4b3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9c, offset 0x4ba
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9d, offset 0x4c2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9e, offset 0x4c7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0x9f, offset 0x4cb
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa0, offset 0x4db
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa1, offset 0x4e2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa2, offset 0x4e6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa3, offset 0x4ea
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa4, offset 0x4f1
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa5, offset 0x4f3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa6, offset 0x4f6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa7, offset 0x4f9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa8, offset 0x4fd
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0908, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0xa1},
+ {value: 0x0c08, lo: 0xa2, hi: 0xa2},
+ {value: 0x0a08, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xa9, offset 0x506
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xaa, offset 0x50a
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0xa6},
+ {value: 0x0808, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb3},
+ {value: 0x0a08, lo: 0xb4, hi: 0xbf},
+ // Block 0xab, offset 0x512
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x84},
+ {value: 0x0808, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x90},
+ {value: 0x0a18, lo: 0x91, hi: 0x93},
+ {value: 0x0c18, lo: 0x94, hi: 0x94},
+ {value: 0x0818, lo: 0x95, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xac, offset 0x51a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xad, offset 0x51e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xae, offset 0x524
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xaf, offset 0x52d
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xb0, offset 0x539
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb1, offset 0x540
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb2, offset 0x549
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb3, offset 0x553
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb4, offset 0x55a
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb5, offset 0x568
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb6, offset 0x575
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb7, offset 0x582
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb8, offset 0x58b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb9, offset 0x58f
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xba, offset 0x59f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xbb, offset 0x5a7
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbc, offset 0x5b2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbd, offset 0x5bb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbe, offset 0x5c1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbf, offset 0x5c9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xc0, offset 0x5d2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc1, offset 0x5dd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc2, offset 0x5e0
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc3, offset 0x5ec
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xc4, offset 0x5f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc5, offset 0x5f8
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5fd
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xc7, offset 0x602
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x99},
+ {value: 0x3308, lo: 0x9a, hi: 0x9b},
+ {value: 0x3008, lo: 0x9c, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xbf},
+ // Block 0xc8, offset 0x60f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xc9, offset 0x61a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xca, offset 0x623
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xcb, offset 0x62d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xcc, offset 0x630
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xcd, offset 0x63a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xce, offset 0x643
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xcf, offset 0x64f
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xd0, offset 0x65c
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xd1, offset 0x669
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x3008, lo: 0x93, hi: 0x94},
+ {value: 0x3308, lo: 0x95, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x96},
+ {value: 0x3b08, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xbf},
+ // Block 0xd2, offset 0x677
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd3, offset 0x67e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xd4, offset 0x682
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xd5, offset 0x685
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xd6, offset 0x68a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xd7, offset 0x68d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0340, lo: 0xb0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd8, offset 0x692
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xd9, offset 0x695
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xda, offset 0x69c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xdb, offset 0x6a3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xdc, offset 0x6a7
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xdd, offset 0x6b2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xde, offset 0x6b5
+ {value: 0x0000, lo: 0x02},
+ {value: 0xe105, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xdf, offset 0x6b8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0xe0, offset 0x6bb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbf},
+ // Block 0xe1, offset 0x6c1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xe2, offset 0x6c7
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xe3, offset 0x6cd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xe4, offset 0x6d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xe5, offset 0x6d3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe6, offset 0x6d6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0xa3},
+ {value: 0x0008, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xe7, offset 0x6dd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xe8, offset 0x6e0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xe9, offset 0x6e5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xea, offset 0x6ef
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xeb, offset 0x6f2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xec, offset 0x6f6
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xed, offset 0x705
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xee, offset 0x711
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xef, offset 0x715
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xf0, offset 0x71a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf1, offset 0x71e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xf2, offset 0x723
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xf3, offset 0x727
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xf4, offset 0x72c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xf5, offset 0x735
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xf6, offset 0x740
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xf7, offset 0x746
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xf8, offset 0x74c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xf9, offset 0x752
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xfa, offset 0x758
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0b08, lo: 0x8b, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xfb, offset 0x761
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xb0},
+ {value: 0x0818, lo: 0xb1, hi: 0xbf},
+ // Block 0xfc, offset 0x764
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0818, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xfd, offset 0x767
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0818, lo: 0x81, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xfe, offset 0x76b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xff, offset 0x76f
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x100, offset 0x773
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x101, offset 0x779
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x102, offset 0x77f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1d9, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0x103, offset 0x784
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0x104, offset 0x787
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xc801, lo: 0x80, hi: 0x80},
+ {value: 0xc851, lo: 0x81, hi: 0x81},
+ {value: 0xc8a1, lo: 0x82, hi: 0x82},
+ {value: 0xc8f1, lo: 0x83, hi: 0x83},
+ {value: 0xc941, lo: 0x84, hi: 0x84},
+ {value: 0xc991, lo: 0x85, hi: 0x85},
+ {value: 0xc9e1, lo: 0x86, hi: 0x86},
+ {value: 0xca31, lo: 0x87, hi: 0x87},
+ {value: 0xca81, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcad1, lo: 0x90, hi: 0x90},
+ {value: 0xcaf1, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0x105, offset 0x797
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x106, offset 0x79e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x107, offset 0x7a1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x108, offset 0x7a6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x109, offset 0x7aa
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x10a, offset 0x7b0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x10b, offset 0x7b5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x10c, offset 0x7b9
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0x10d, offset 0x7bf
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xa4},
+ {value: 0x0018, lo: 0xa5, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xbf},
+ // Block 0x10e, offset 0x7c5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x10f, offset 0x7c9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x110, offset 0x7d2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x111, offset 0x7d7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x112, offset 0x7da
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x113, offset 0x7dd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x114, offset 0x7e1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x115, offset 0x7e5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x116, offset 0x7e8
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xded1, lo: 0x80, hi: 0x89},
+ {value: 0x8e35, lo: 0x8a, hi: 0x8a},
+ {value: 0xe011, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e55, lo: 0x9d, hi: 0x9d},
+ {value: 0xe251, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e75, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2f1, lo: 0xa4, hi: 0xab},
+ {value: 0x7f0d, lo: 0xac, hi: 0xac},
+ {value: 0xe3f1, lo: 0xad, hi: 0xaf},
+ {value: 0x8e95, lo: 0xb0, hi: 0xb0},
+ {value: 0xe451, lo: 0xb1, hi: 0xb6},
+ {value: 0x8eb5, lo: 0xb7, hi: 0xb9},
+ {value: 0xe511, lo: 0xba, hi: 0xba},
+ {value: 0x8f15, lo: 0xbb, hi: 0xbb},
+ {value: 0xe531, lo: 0xbc, hi: 0xbf},
+ // Block 0x117, offset 0x7f8
+ {value: 0x0020, lo: 0x10},
+ {value: 0x93b5, lo: 0x80, hi: 0x80},
+ {value: 0xf0b1, lo: 0x81, hi: 0x86},
+ {value: 0x93d5, lo: 0x87, hi: 0x8a},
+ {value: 0xda11, lo: 0x8b, hi: 0x8b},
+ {value: 0xf171, lo: 0x8c, hi: 0x96},
+ {value: 0x9455, lo: 0x97, hi: 0x97},
+ {value: 0xf2d1, lo: 0x98, hi: 0xa3},
+ {value: 0x9475, lo: 0xa4, hi: 0xa6},
+ {value: 0xf451, lo: 0xa7, hi: 0xaa},
+ {value: 0x94d5, lo: 0xab, hi: 0xab},
+ {value: 0xf4d1, lo: 0xac, hi: 0xac},
+ {value: 0x94f5, lo: 0xad, hi: 0xad},
+ {value: 0xf4f1, lo: 0xae, hi: 0xaf},
+ {value: 0x9515, lo: 0xb0, hi: 0xb1},
+ {value: 0xf531, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0x118, offset 0x809
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x119, offset 0x80e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x11a, offset 0x810
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x11b, offset 0x812
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 42780 bytes (41KiB); checksum: 29936AB9
diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go
new file mode 100644
index 0000000..2fb768e
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables13.0.0.go
@@ -0,0 +1,4959 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+//go:build go1.16 && !go1.21
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "13.0.0"
+
+var mappings string = "" + // Size: 6539 bytes
+ " ̈a ̄23 ́ ̧1o1⁄41⁄23⁄4i̇l·ʼnsdžⱥⱦhjrwy ̆ ̇ ̊ ̨ ̃ ̋lẍ́ ι; ̈́եւاٴوٴۇٴيٴक" +
+ "़ख़ग़ज़ड़ढ़फ़य़ড়ঢ়য়ਲ਼ਸ਼ਖ਼ਗ਼ਜ਼ਫ਼ଡ଼ଢ଼ําໍາຫນຫມགྷཌྷདྷབྷཛྷཀྵཱཱིུྲྀྲཱྀླྀླཱ" +
+ "ཱྀྀྒྷྜྷྡྷྦྷྫྷྐྵвдостъѣæbdeǝgikmnȣptuɐɑəɛɜŋɔɯvβγδφχρнɒcɕðfɟɡɥɨɩɪʝɭʟɱɰɲɳ" +
+ "ɴɵɸʂʃƫʉʊʋʌzʐʑʒθssάέήίόύώἀιἁιἂιἃιἄιἅιἆιἇιἠιἡιἢιἣιἤιἥιἦιἧιὠιὡιὢιὣιὤιὥιὦιὧ" +
+ "ιὰιαιάιᾶιι ̈͂ὴιηιήιῆι ̓̀ ̓́ ̓͂ΐ ̔̀ ̔́ ̔͂ΰ ̈̀`ὼιωιώιῶι′′′′′‵‵‵‵‵!!???!!?" +
+ "′′′′0456789+=()rsħnoqsmtmωåאבגדπ1⁄71⁄91⁄101⁄32⁄31⁄52⁄53⁄54⁄51⁄65⁄61⁄83" +
+ "⁄85⁄87⁄81⁄iiivviviiiixxi0⁄3∫∫∫∫∫∮∮∮∮∮1011121314151617181920(10)(11)(12" +
+ ")(13)(14)(15)(16)(17)(18)(19)(20)∫∫∫∫==⫝̸ɫɽȿɀ. ゙ ゚よりコト(ᄀ)(ᄂ)(ᄃ)(ᄅ)(ᄆ)(ᄇ)" +
+ "(ᄉ)(ᄋ)(ᄌ)(ᄎ)(ᄏ)(ᄐ)(ᄑ)(ᄒ)(가)(나)(다)(라)(마)(바)(사)(아)(자)(차)(카)(타)(파)(하)(주)(오전" +
+ ")(오후)(一)(二)(三)(四)(五)(六)(七)(八)(九)(十)(月)(火)(水)(木)(金)(土)(日)(株)(有)(社)(名)(特)(" +
+ "財)(祝)(労)(代)(呼)(学)(監)(企)(資)(協)(祭)(休)(自)(至)21222324252627282930313233343" +
+ "5참고주의3637383940414243444546474849501月2月3月4月5月6月7月8月9月10月11月12月hgev令和アパート" +
+ "アルファアンペアアールイニングインチウォンエスクードエーカーオンスオームカイリカラットカロリーガロンガンマギガギニーキュリーギルダーキロキロ" +
+ "グラムキロメートルキロワットグラムグラムトンクルゼイロクローネケースコルナコーポサイクルサンチームシリングセンチセントダースデシドルトンナノ" +
+ "ノットハイツパーセントパーツバーレルピアストルピクルピコビルファラッドフィートブッシェルフランヘクタールペソペニヒヘルツペンスページベータポ" +
+ "イントボルトホンポンドホールホーンマイクロマイルマッハマルクマンションミクロンミリミリバールメガメガトンメートルヤードヤールユアンリットルリ" +
+ "ラルピールーブルレムレントゲンワット0点1点2点3点4点5点6点7点8点9点10点11点12点13点14点15点16点17点18点19点20" +
+ "点21点22点23点24点daauovpcdmiu平成昭和大正明治株式会社panamakakbmbgbkcalpfnfmgkghzmldlk" +
+ "lfmnmmmcmkmm2m3m∕sm∕s2rad∕srad∕s2psnsmspvnvmvkvpwnwmwkwbqcccdc∕kgdbgyhah" +
+ "pinkkktlmlnlxphprsrsvwbv∕ma∕m1日2日3日4日5日6日7日8日9日10日11日12日13日14日15日16日17日1" +
+ "8日19日20日21日22日23日24日25日26日27日28日29日30日31日ьɦɬʞʇœʍ𤋮𢡊𢡄𣏕𥉉𥳐𧻓fffiflstմնմեմիվնմ" +
+ "խיִײַעהכלםרתשׁשׂשּׁשּׂאַאָאּבּגּדּהּוּזּטּיּךּכּלּמּנּסּףּפּצּקּרּשּתּו" +
+ "ֹבֿכֿפֿאלٱٻپڀٺٿٹڤڦڄڃچڇڍڌڎڈژڑکگڳڱںڻۀہھےۓڭۇۆۈۋۅۉېىئائەئوئۇئۆئۈئېئىیئجئحئم" +
+ "ئيبجبحبخبمبىبيتجتحتختمتىتيثجثمثىثيجحجمحجحمخجخحخمسجسحسخسمصحصمضجضحضخضمطحط" +
+ "مظمعجعمغجغمفجفحفخفمفىفيقحقمقىقيكاكجكحكخكلكمكىكيلجلحلخلملىليمجمحمخمممىمي" +
+ "نجنحنخنمنىنيهجهمهىهييجيحيخيميىييذٰرٰىٰ ٌّ ٍّ َّ ُّ ِّ ّٰئرئزئنبربزبنترت" +
+ "زتنثرثزثنمانرنزننيريزينئخئهبهتهصخلهنههٰيهثهسهشمشهـَّـُّـِّطىطيعىعيغىغيس" +
+ "ىسيشىشيحىحيجىجيخىخيصىصيضىضيشجشحشخشرسرصرضراًتجمتحجتحمتخمتمجتمحتمخجمححميح" +
+ "مىسحجسجحسجىسمحسمجسممصححصممشحمشجيشمخشممضحىضخمطمحطممطميعجمعممعمىغممغميغمى" +
+ "فخمقمحقمملحملحيلحىلججلخملمحمحجمحممحيمجحمجممخجمخممجخهمجهممنحمنحىنجمنجىنم" +
+ "ينمىيممبخيتجيتجىتخيتخىتميتمىجميجحىجمىسخىصحيشحيضحيلجيلمييحييجييميمميقمين" +
+ "حيعميكمينجحمخيلجمكممجحيحجيمجيفميبحيسخينجيصلےقلےاللهاكبرمحمدصلعمرسولعليه" +
+ "وسلمصلىصلى الله عليه وسلمجل جلالهریال,:!?_{}[]#&*-<>\\$%@ـًـَـُـِـّـْءآ" +
+ "أؤإئابةتثجحخدذرزسشصضطظعغفقكلمنهويلآلألإلا\x22'/^|~¢£¬¦¥𝅗𝅥𝅘𝅥𝅘𝅥𝅮𝅘𝅥𝅯𝅘𝅥𝅰𝅘𝅥𝅱" +
+ "𝅘𝅥𝅲𝆹𝅥𝆺𝅥𝆹𝅥𝅮𝆺𝅥𝅮𝆹𝅥𝅯𝆺𝅥𝅯ıȷαεζηκλμνξοστυψ∇∂ϝٮڡٯ0,1,2,3,4,5,6,7,8,9,(a)(b)(c" +
+ ")(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)(r)(s)(t)(u)(v)(w)(x)(y)(z)〔s" +
+ "〕wzhvsdppvwcmcmdmrdjほかココサ手字双デ二多解天交映無料前後再新初終生販声吹演投捕一三遊左中右指走打禁空合満有月申割営配〔" +
+ "本〕〔三〕〔二〕〔安〕〔点〕〔打〕〔盗〕〔勝〕〔敗〕得可丽丸乁你侮侻倂偺備僧像㒞免兔兤具㒹內冗冤仌冬况凵刃㓟刻剆剷㔕勇勉勤勺包匆北卉卑博即卽" +
+ "卿灰及叟叫叱吆咞吸呈周咢哶唐啓啣善喙喫喳嗂圖嘆圗噑噴切壮城埴堍型堲報墬売壷夆夢奢姬娛娧姘婦㛮嬈嬾寃寘寧寳寿将尢㞁屠屮峀岍嵃嵮嵫嵼巡巢㠯巽帨帽" +
+ "幩㡢㡼庰庳庶廊廾舁弢㣇形彫㣣徚忍志忹悁㤺㤜悔惇慈慌慎慺憎憲憤憯懞懲懶成戛扝抱拔捐挽拼捨掃揤搢揅掩㨮摩摾撝摷㩬敏敬旣書晉㬙暑㬈㫤冒冕最暜肭䏙朗" +
+ "望朡杞杓㭉柺枅桒梅梎栟椔㮝楂榣槪檨櫛㰘次歔㱎歲殟殺殻汎沿泍汧洖派海流浩浸涅洴港湮㴳滋滇淹潮濆瀹瀞瀛㶖灊災灷炭煅熜爨爵牐犀犕獺王㺬玥㺸瑇瑜瑱璅" +
+ "瓊㼛甤甾異瘐㿼䀈直眞真睊䀹瞋䁆䂖硎碌磌䃣祖福秫䄯穀穊穏䈂篆築䈧糒䊠糨糣紀絣䌁緇縂繅䌴䍙罺羕翺者聠聰䏕育脃䐋脾媵舄辞䑫芑芋芝劳花芳芽苦若茝荣莭" +
+ "茣莽菧著荓菊菌菜䔫蓱蓳蔖蕤䕝䕡䕫虐虜虧虩蚩蚈蜎蛢蝹蜨蝫螆蟡蠁䗹衠衣裗裞䘵裺㒻䚾䛇誠諭變豕貫賁贛起跋趼跰軔輸邔郱鄑鄛鈸鋗鋘鉼鏹鐕開䦕閷䧦雃嶲霣" +
+ "䩮䩶韠䪲頋頩飢䬳餩馧駂駾䯎鬒鱀鳽䳎䳭鵧䳸麻䵖黹黾鼅鼏鼖鼻"
+
+var mappingIndex = []uint16{ // 1650 elements
+ // Entry 0 - 3F
+ 0x0000, 0x0000, 0x0001, 0x0004, 0x0005, 0x0008, 0x0009, 0x000a,
+ 0x000d, 0x0010, 0x0011, 0x0012, 0x0017, 0x001c, 0x0021, 0x0024,
+ 0x0027, 0x002a, 0x002b, 0x002e, 0x0031, 0x0034, 0x0035, 0x0036,
+ 0x0037, 0x0038, 0x0039, 0x003c, 0x003f, 0x0042, 0x0045, 0x0048,
+ 0x004b, 0x004c, 0x004d, 0x0051, 0x0054, 0x0055, 0x005a, 0x005e,
+ 0x0062, 0x0066, 0x006a, 0x006e, 0x0074, 0x007a, 0x0080, 0x0086,
+ 0x008c, 0x0092, 0x0098, 0x009e, 0x00a4, 0x00aa, 0x00b0, 0x00b6,
+ 0x00bc, 0x00c2, 0x00c8, 0x00ce, 0x00d4, 0x00da, 0x00e0, 0x00e6,
+ // Entry 40 - 7F
+ 0x00ec, 0x00f2, 0x00f8, 0x00fe, 0x0104, 0x010a, 0x0110, 0x0116,
+ 0x011c, 0x0122, 0x0128, 0x012e, 0x0137, 0x013d, 0x0146, 0x014c,
+ 0x0152, 0x0158, 0x015e, 0x0164, 0x016a, 0x0170, 0x0172, 0x0174,
+ 0x0176, 0x0178, 0x017a, 0x017c, 0x017e, 0x0180, 0x0181, 0x0182,
+ 0x0183, 0x0185, 0x0186, 0x0187, 0x0188, 0x0189, 0x018a, 0x018c,
+ 0x018d, 0x018e, 0x018f, 0x0191, 0x0193, 0x0195, 0x0197, 0x0199,
+ 0x019b, 0x019d, 0x019f, 0x01a0, 0x01a2, 0x01a4, 0x01a6, 0x01a8,
+ 0x01aa, 0x01ac, 0x01ae, 0x01b0, 0x01b1, 0x01b3, 0x01b5, 0x01b6,
+ // Entry 80 - BF
+ 0x01b8, 0x01ba, 0x01bc, 0x01be, 0x01c0, 0x01c2, 0x01c4, 0x01c6,
+ 0x01c8, 0x01ca, 0x01cc, 0x01ce, 0x01d0, 0x01d2, 0x01d4, 0x01d6,
+ 0x01d8, 0x01da, 0x01dc, 0x01de, 0x01e0, 0x01e2, 0x01e4, 0x01e5,
+ 0x01e7, 0x01e9, 0x01eb, 0x01ed, 0x01ef, 0x01f1, 0x01f3, 0x01f5,
+ 0x01f7, 0x01f9, 0x01fb, 0x01fd, 0x0202, 0x0207, 0x020c, 0x0211,
+ 0x0216, 0x021b, 0x0220, 0x0225, 0x022a, 0x022f, 0x0234, 0x0239,
+ 0x023e, 0x0243, 0x0248, 0x024d, 0x0252, 0x0257, 0x025c, 0x0261,
+ 0x0266, 0x026b, 0x0270, 0x0275, 0x027a, 0x027e, 0x0282, 0x0287,
+ // Entry C0 - FF
+ 0x0289, 0x028e, 0x0293, 0x0297, 0x029b, 0x02a0, 0x02a5, 0x02aa,
+ 0x02af, 0x02b1, 0x02b6, 0x02bb, 0x02c0, 0x02c2, 0x02c7, 0x02c8,
+ 0x02cd, 0x02d1, 0x02d5, 0x02da, 0x02e0, 0x02e9, 0x02ef, 0x02f8,
+ 0x02fa, 0x02fc, 0x02fe, 0x0300, 0x030c, 0x030d, 0x030e, 0x030f,
+ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
+ 0x0319, 0x031b, 0x031d, 0x031e, 0x0320, 0x0322, 0x0324, 0x0326,
+ 0x0328, 0x032a, 0x032c, 0x032e, 0x0330, 0x0335, 0x033a, 0x0340,
+ 0x0345, 0x034a, 0x034f, 0x0354, 0x0359, 0x035e, 0x0363, 0x0368,
+ // Entry 100 - 13F
+ 0x036d, 0x0372, 0x0377, 0x037c, 0x0380, 0x0382, 0x0384, 0x0386,
+ 0x038a, 0x038c, 0x038e, 0x0393, 0x0399, 0x03a2, 0x03a8, 0x03b1,
+ 0x03b3, 0x03b5, 0x03b7, 0x03b9, 0x03bb, 0x03bd, 0x03bf, 0x03c1,
+ 0x03c3, 0x03c5, 0x03c7, 0x03cb, 0x03cf, 0x03d3, 0x03d7, 0x03db,
+ 0x03df, 0x03e3, 0x03e7, 0x03eb, 0x03ef, 0x03f3, 0x03ff, 0x0401,
+ 0x0406, 0x0408, 0x040a, 0x040c, 0x040e, 0x040f, 0x0413, 0x0417,
+ 0x041d, 0x0423, 0x0428, 0x042d, 0x0432, 0x0437, 0x043c, 0x0441,
+ 0x0446, 0x044b, 0x0450, 0x0455, 0x045a, 0x045f, 0x0464, 0x0469,
+ // Entry 140 - 17F
+ 0x046e, 0x0473, 0x0478, 0x047d, 0x0482, 0x0487, 0x048c, 0x0491,
+ 0x0496, 0x049b, 0x04a0, 0x04a5, 0x04aa, 0x04af, 0x04b4, 0x04bc,
+ 0x04c4, 0x04c9, 0x04ce, 0x04d3, 0x04d8, 0x04dd, 0x04e2, 0x04e7,
+ 0x04ec, 0x04f1, 0x04f6, 0x04fb, 0x0500, 0x0505, 0x050a, 0x050f,
+ 0x0514, 0x0519, 0x051e, 0x0523, 0x0528, 0x052d, 0x0532, 0x0537,
+ 0x053c, 0x0541, 0x0546, 0x054b, 0x0550, 0x0555, 0x055a, 0x055f,
+ 0x0564, 0x0569, 0x056e, 0x0573, 0x0578, 0x057a, 0x057c, 0x057e,
+ 0x0580, 0x0582, 0x0584, 0x0586, 0x0588, 0x058a, 0x058c, 0x058e,
+ // Entry 180 - 1BF
+ 0x0590, 0x0592, 0x0594, 0x0596, 0x059c, 0x05a2, 0x05a4, 0x05a6,
+ 0x05a8, 0x05aa, 0x05ac, 0x05ae, 0x05b0, 0x05b2, 0x05b4, 0x05b6,
+ 0x05b8, 0x05ba, 0x05bc, 0x05be, 0x05c0, 0x05c4, 0x05c8, 0x05cc,
+ 0x05d0, 0x05d4, 0x05d8, 0x05dc, 0x05e0, 0x05e4, 0x05e9, 0x05ee,
+ 0x05f3, 0x05f5, 0x05f7, 0x05fd, 0x0609, 0x0615, 0x0621, 0x062a,
+ 0x0636, 0x063f, 0x0648, 0x0657, 0x0663, 0x066c, 0x0675, 0x067e,
+ 0x068a, 0x0696, 0x069f, 0x06a8, 0x06ae, 0x06b7, 0x06c3, 0x06cf,
+ 0x06d5, 0x06e4, 0x06f6, 0x0705, 0x070e, 0x071d, 0x072c, 0x0738,
+ // Entry 1C0 - 1FF
+ 0x0741, 0x074a, 0x0753, 0x075f, 0x076e, 0x077a, 0x0783, 0x078c,
+ 0x0795, 0x079b, 0x07a1, 0x07a7, 0x07ad, 0x07b6, 0x07bf, 0x07ce,
+ 0x07d7, 0x07e3, 0x07f2, 0x07fb, 0x0801, 0x0807, 0x0816, 0x0822,
+ 0x0831, 0x083a, 0x0849, 0x084f, 0x0858, 0x0861, 0x086a, 0x0873,
+ 0x087c, 0x0888, 0x0891, 0x0897, 0x08a0, 0x08a9, 0x08b2, 0x08be,
+ 0x08c7, 0x08d0, 0x08d9, 0x08e8, 0x08f4, 0x08fa, 0x0909, 0x090f,
+ 0x091b, 0x0927, 0x0930, 0x0939, 0x0942, 0x094e, 0x0954, 0x095d,
+ 0x0969, 0x096f, 0x097e, 0x0987, 0x098b, 0x098f, 0x0993, 0x0997,
+ // Entry 200 - 23F
+ 0x099b, 0x099f, 0x09a3, 0x09a7, 0x09ab, 0x09af, 0x09b4, 0x09b9,
+ 0x09be, 0x09c3, 0x09c8, 0x09cd, 0x09d2, 0x09d7, 0x09dc, 0x09e1,
+ 0x09e6, 0x09eb, 0x09f0, 0x09f5, 0x09fa, 0x09fc, 0x09fe, 0x0a00,
+ 0x0a02, 0x0a04, 0x0a06, 0x0a0c, 0x0a12, 0x0a18, 0x0a1e, 0x0a2a,
+ 0x0a2c, 0x0a2e, 0x0a30, 0x0a32, 0x0a34, 0x0a36, 0x0a38, 0x0a3c,
+ 0x0a3e, 0x0a40, 0x0a42, 0x0a44, 0x0a46, 0x0a48, 0x0a4a, 0x0a4c,
+ 0x0a4e, 0x0a50, 0x0a52, 0x0a54, 0x0a56, 0x0a58, 0x0a5a, 0x0a5f,
+ 0x0a65, 0x0a6c, 0x0a74, 0x0a76, 0x0a78, 0x0a7a, 0x0a7c, 0x0a7e,
+ // Entry 240 - 27F
+ 0x0a80, 0x0a82, 0x0a84, 0x0a86, 0x0a88, 0x0a8a, 0x0a8c, 0x0a8e,
+ 0x0a90, 0x0a96, 0x0a98, 0x0a9a, 0x0a9c, 0x0a9e, 0x0aa0, 0x0aa2,
+ 0x0aa4, 0x0aa6, 0x0aa8, 0x0aaa, 0x0aac, 0x0aae, 0x0ab0, 0x0ab2,
+ 0x0ab4, 0x0ab9, 0x0abe, 0x0ac2, 0x0ac6, 0x0aca, 0x0ace, 0x0ad2,
+ 0x0ad6, 0x0ada, 0x0ade, 0x0ae2, 0x0ae7, 0x0aec, 0x0af1, 0x0af6,
+ 0x0afb, 0x0b00, 0x0b05, 0x0b0a, 0x0b0f, 0x0b14, 0x0b19, 0x0b1e,
+ 0x0b23, 0x0b28, 0x0b2d, 0x0b32, 0x0b37, 0x0b3c, 0x0b41, 0x0b46,
+ 0x0b4b, 0x0b50, 0x0b52, 0x0b54, 0x0b56, 0x0b58, 0x0b5a, 0x0b5c,
+ // Entry 280 - 2BF
+ 0x0b5e, 0x0b62, 0x0b66, 0x0b6a, 0x0b6e, 0x0b72, 0x0b76, 0x0b7a,
+ 0x0b7c, 0x0b7e, 0x0b80, 0x0b82, 0x0b86, 0x0b8a, 0x0b8e, 0x0b92,
+ 0x0b96, 0x0b9a, 0x0b9e, 0x0ba0, 0x0ba2, 0x0ba4, 0x0ba6, 0x0ba8,
+ 0x0baa, 0x0bac, 0x0bb0, 0x0bb4, 0x0bba, 0x0bc0, 0x0bc4, 0x0bc8,
+ 0x0bcc, 0x0bd0, 0x0bd4, 0x0bd8, 0x0bdc, 0x0be0, 0x0be4, 0x0be8,
+ 0x0bec, 0x0bf0, 0x0bf4, 0x0bf8, 0x0bfc, 0x0c00, 0x0c04, 0x0c08,
+ 0x0c0c, 0x0c10, 0x0c14, 0x0c18, 0x0c1c, 0x0c20, 0x0c24, 0x0c28,
+ 0x0c2c, 0x0c30, 0x0c34, 0x0c36, 0x0c38, 0x0c3a, 0x0c3c, 0x0c3e,
+ // Entry 2C0 - 2FF
+ 0x0c40, 0x0c42, 0x0c44, 0x0c46, 0x0c48, 0x0c4a, 0x0c4c, 0x0c4e,
+ 0x0c50, 0x0c52, 0x0c54, 0x0c56, 0x0c58, 0x0c5a, 0x0c5c, 0x0c5e,
+ 0x0c60, 0x0c62, 0x0c64, 0x0c66, 0x0c68, 0x0c6a, 0x0c6c, 0x0c6e,
+ 0x0c70, 0x0c72, 0x0c74, 0x0c76, 0x0c78, 0x0c7a, 0x0c7c, 0x0c7e,
+ 0x0c80, 0x0c82, 0x0c86, 0x0c8a, 0x0c8e, 0x0c92, 0x0c96, 0x0c9a,
+ 0x0c9e, 0x0ca2, 0x0ca4, 0x0ca8, 0x0cac, 0x0cb0, 0x0cb4, 0x0cb8,
+ 0x0cbc, 0x0cc0, 0x0cc4, 0x0cc8, 0x0ccc, 0x0cd0, 0x0cd4, 0x0cd8,
+ 0x0cdc, 0x0ce0, 0x0ce4, 0x0ce8, 0x0cec, 0x0cf0, 0x0cf4, 0x0cf8,
+ // Entry 300 - 33F
+ 0x0cfc, 0x0d00, 0x0d04, 0x0d08, 0x0d0c, 0x0d10, 0x0d14, 0x0d18,
+ 0x0d1c, 0x0d20, 0x0d24, 0x0d28, 0x0d2c, 0x0d30, 0x0d34, 0x0d38,
+ 0x0d3c, 0x0d40, 0x0d44, 0x0d48, 0x0d4c, 0x0d50, 0x0d54, 0x0d58,
+ 0x0d5c, 0x0d60, 0x0d64, 0x0d68, 0x0d6c, 0x0d70, 0x0d74, 0x0d78,
+ 0x0d7c, 0x0d80, 0x0d84, 0x0d88, 0x0d8c, 0x0d90, 0x0d94, 0x0d98,
+ 0x0d9c, 0x0da0, 0x0da4, 0x0da8, 0x0dac, 0x0db0, 0x0db4, 0x0db8,
+ 0x0dbc, 0x0dc0, 0x0dc4, 0x0dc8, 0x0dcc, 0x0dd0, 0x0dd4, 0x0dd8,
+ 0x0ddc, 0x0de0, 0x0de4, 0x0de8, 0x0dec, 0x0df0, 0x0df4, 0x0df8,
+ // Entry 340 - 37F
+ 0x0dfc, 0x0e00, 0x0e04, 0x0e08, 0x0e0c, 0x0e10, 0x0e14, 0x0e18,
+ 0x0e1d, 0x0e22, 0x0e27, 0x0e2c, 0x0e31, 0x0e36, 0x0e3a, 0x0e3e,
+ 0x0e42, 0x0e46, 0x0e4a, 0x0e4e, 0x0e52, 0x0e56, 0x0e5a, 0x0e5e,
+ 0x0e62, 0x0e66, 0x0e6a, 0x0e6e, 0x0e72, 0x0e76, 0x0e7a, 0x0e7e,
+ 0x0e82, 0x0e86, 0x0e8a, 0x0e8e, 0x0e92, 0x0e96, 0x0e9a, 0x0e9e,
+ 0x0ea2, 0x0ea6, 0x0eaa, 0x0eae, 0x0eb2, 0x0eb6, 0x0ebc, 0x0ec2,
+ 0x0ec8, 0x0ecc, 0x0ed0, 0x0ed4, 0x0ed8, 0x0edc, 0x0ee0, 0x0ee4,
+ 0x0ee8, 0x0eec, 0x0ef0, 0x0ef4, 0x0ef8, 0x0efc, 0x0f00, 0x0f04,
+ // Entry 380 - 3BF
+ 0x0f08, 0x0f0c, 0x0f10, 0x0f14, 0x0f18, 0x0f1c, 0x0f20, 0x0f24,
+ 0x0f28, 0x0f2c, 0x0f30, 0x0f34, 0x0f38, 0x0f3e, 0x0f44, 0x0f4a,
+ 0x0f50, 0x0f56, 0x0f5c, 0x0f62, 0x0f68, 0x0f6e, 0x0f74, 0x0f7a,
+ 0x0f80, 0x0f86, 0x0f8c, 0x0f92, 0x0f98, 0x0f9e, 0x0fa4, 0x0faa,
+ 0x0fb0, 0x0fb6, 0x0fbc, 0x0fc2, 0x0fc8, 0x0fce, 0x0fd4, 0x0fda,
+ 0x0fe0, 0x0fe6, 0x0fec, 0x0ff2, 0x0ff8, 0x0ffe, 0x1004, 0x100a,
+ 0x1010, 0x1016, 0x101c, 0x1022, 0x1028, 0x102e, 0x1034, 0x103a,
+ 0x1040, 0x1046, 0x104c, 0x1052, 0x1058, 0x105e, 0x1064, 0x106a,
+ // Entry 3C0 - 3FF
+ 0x1070, 0x1076, 0x107c, 0x1082, 0x1088, 0x108e, 0x1094, 0x109a,
+ 0x10a0, 0x10a6, 0x10ac, 0x10b2, 0x10b8, 0x10be, 0x10c4, 0x10ca,
+ 0x10d0, 0x10d6, 0x10dc, 0x10e2, 0x10e8, 0x10ee, 0x10f4, 0x10fa,
+ 0x1100, 0x1106, 0x110c, 0x1112, 0x1118, 0x111e, 0x1124, 0x112a,
+ 0x1130, 0x1136, 0x113c, 0x1142, 0x1148, 0x114e, 0x1154, 0x115a,
+ 0x1160, 0x1166, 0x116c, 0x1172, 0x1178, 0x1180, 0x1188, 0x1190,
+ 0x1198, 0x11a0, 0x11a8, 0x11b0, 0x11b6, 0x11d7, 0x11e6, 0x11ee,
+ 0x11ef, 0x11f0, 0x11f1, 0x11f2, 0x11f3, 0x11f4, 0x11f5, 0x11f6,
+ // Entry 400 - 43F
+ 0x11f7, 0x11f8, 0x11f9, 0x11fa, 0x11fb, 0x11fc, 0x11fd, 0x11fe,
+ 0x11ff, 0x1200, 0x1201, 0x1205, 0x1209, 0x120d, 0x1211, 0x1215,
+ 0x1219, 0x121b, 0x121d, 0x121f, 0x1221, 0x1223, 0x1225, 0x1227,
+ 0x1229, 0x122b, 0x122d, 0x122f, 0x1231, 0x1233, 0x1235, 0x1237,
+ 0x1239, 0x123b, 0x123d, 0x123f, 0x1241, 0x1243, 0x1245, 0x1247,
+ 0x1249, 0x124b, 0x124d, 0x124f, 0x1251, 0x1253, 0x1255, 0x1257,
+ 0x1259, 0x125b, 0x125d, 0x125f, 0x1263, 0x1267, 0x126b, 0x126f,
+ 0x1270, 0x1271, 0x1272, 0x1273, 0x1274, 0x1275, 0x1277, 0x1279,
+ // Entry 440 - 47F
+ 0x127b, 0x127d, 0x127f, 0x1287, 0x128f, 0x129b, 0x12a7, 0x12b3,
+ 0x12bf, 0x12cb, 0x12d3, 0x12db, 0x12e7, 0x12f3, 0x12ff, 0x130b,
+ 0x130d, 0x130f, 0x1311, 0x1313, 0x1315, 0x1317, 0x1319, 0x131b,
+ 0x131d, 0x131f, 0x1321, 0x1323, 0x1325, 0x1327, 0x1329, 0x132b,
+ 0x132e, 0x1331, 0x1333, 0x1335, 0x1337, 0x1339, 0x133b, 0x133d,
+ 0x133f, 0x1341, 0x1343, 0x1345, 0x1347, 0x1349, 0x134b, 0x134d,
+ 0x1350, 0x1353, 0x1356, 0x1359, 0x135c, 0x135f, 0x1362, 0x1365,
+ 0x1368, 0x136b, 0x136e, 0x1371, 0x1374, 0x1377, 0x137a, 0x137d,
+ // Entry 480 - 4BF
+ 0x1380, 0x1383, 0x1386, 0x1389, 0x138c, 0x138f, 0x1392, 0x1395,
+ 0x1398, 0x139b, 0x13a2, 0x13a4, 0x13a6, 0x13a8, 0x13ab, 0x13ad,
+ 0x13af, 0x13b1, 0x13b3, 0x13b5, 0x13bb, 0x13c1, 0x13c4, 0x13c7,
+ 0x13ca, 0x13cd, 0x13d0, 0x13d3, 0x13d6, 0x13d9, 0x13dc, 0x13df,
+ 0x13e2, 0x13e5, 0x13e8, 0x13eb, 0x13ee, 0x13f1, 0x13f4, 0x13f7,
+ 0x13fa, 0x13fd, 0x1400, 0x1403, 0x1406, 0x1409, 0x140c, 0x140f,
+ 0x1412, 0x1415, 0x1418, 0x141b, 0x141e, 0x1421, 0x1424, 0x1427,
+ 0x142a, 0x142d, 0x1430, 0x1433, 0x1436, 0x1439, 0x143c, 0x143f,
+ // Entry 4C0 - 4FF
+ 0x1442, 0x1445, 0x1448, 0x1451, 0x145a, 0x1463, 0x146c, 0x1475,
+ 0x147e, 0x1487, 0x1490, 0x1499, 0x149c, 0x149f, 0x14a2, 0x14a5,
+ 0x14a8, 0x14ab, 0x14ae, 0x14b1, 0x14b4, 0x14b7, 0x14ba, 0x14bd,
+ 0x14c0, 0x14c3, 0x14c6, 0x14c9, 0x14cc, 0x14cf, 0x14d2, 0x14d5,
+ 0x14d8, 0x14db, 0x14de, 0x14e1, 0x14e4, 0x14e7, 0x14ea, 0x14ed,
+ 0x14f0, 0x14f3, 0x14f6, 0x14f9, 0x14fc, 0x14ff, 0x1502, 0x1505,
+ 0x1508, 0x150b, 0x150e, 0x1511, 0x1514, 0x1517, 0x151a, 0x151d,
+ 0x1520, 0x1523, 0x1526, 0x1529, 0x152c, 0x152f, 0x1532, 0x1535,
+ // Entry 500 - 53F
+ 0x1538, 0x153b, 0x153e, 0x1541, 0x1544, 0x1547, 0x154a, 0x154d,
+ 0x1550, 0x1553, 0x1556, 0x1559, 0x155c, 0x155f, 0x1562, 0x1565,
+ 0x1568, 0x156b, 0x156e, 0x1571, 0x1574, 0x1577, 0x157a, 0x157d,
+ 0x1580, 0x1583, 0x1586, 0x1589, 0x158c, 0x158f, 0x1592, 0x1595,
+ 0x1598, 0x159b, 0x159e, 0x15a1, 0x15a4, 0x15a7, 0x15aa, 0x15ad,
+ 0x15b0, 0x15b3, 0x15b6, 0x15b9, 0x15bc, 0x15bf, 0x15c2, 0x15c5,
+ 0x15c8, 0x15cb, 0x15ce, 0x15d1, 0x15d4, 0x15d7, 0x15da, 0x15dd,
+ 0x15e0, 0x15e3, 0x15e6, 0x15e9, 0x15ec, 0x15ef, 0x15f2, 0x15f5,
+ // Entry 540 - 57F
+ 0x15f8, 0x15fb, 0x15fe, 0x1601, 0x1604, 0x1607, 0x160a, 0x160d,
+ 0x1610, 0x1613, 0x1616, 0x1619, 0x161c, 0x161f, 0x1622, 0x1625,
+ 0x1628, 0x162b, 0x162e, 0x1631, 0x1634, 0x1637, 0x163a, 0x163d,
+ 0x1640, 0x1643, 0x1646, 0x1649, 0x164c, 0x164f, 0x1652, 0x1655,
+ 0x1658, 0x165b, 0x165e, 0x1661, 0x1664, 0x1667, 0x166a, 0x166d,
+ 0x1670, 0x1673, 0x1676, 0x1679, 0x167c, 0x167f, 0x1682, 0x1685,
+ 0x1688, 0x168b, 0x168e, 0x1691, 0x1694, 0x1697, 0x169a, 0x169d,
+ 0x16a0, 0x16a3, 0x16a6, 0x16a9, 0x16ac, 0x16af, 0x16b2, 0x16b5,
+ // Entry 580 - 5BF
+ 0x16b8, 0x16bb, 0x16be, 0x16c1, 0x16c4, 0x16c7, 0x16ca, 0x16cd,
+ 0x16d0, 0x16d3, 0x16d6, 0x16d9, 0x16dc, 0x16df, 0x16e2, 0x16e5,
+ 0x16e8, 0x16eb, 0x16ee, 0x16f1, 0x16f4, 0x16f7, 0x16fa, 0x16fd,
+ 0x1700, 0x1703, 0x1706, 0x1709, 0x170c, 0x170f, 0x1712, 0x1715,
+ 0x1718, 0x171b, 0x171e, 0x1721, 0x1724, 0x1727, 0x172a, 0x172d,
+ 0x1730, 0x1733, 0x1736, 0x1739, 0x173c, 0x173f, 0x1742, 0x1745,
+ 0x1748, 0x174b, 0x174e, 0x1751, 0x1754, 0x1757, 0x175a, 0x175d,
+ 0x1760, 0x1763, 0x1766, 0x1769, 0x176c, 0x176f, 0x1772, 0x1775,
+ // Entry 5C0 - 5FF
+ 0x1778, 0x177b, 0x177e, 0x1781, 0x1784, 0x1787, 0x178a, 0x178d,
+ 0x1790, 0x1793, 0x1796, 0x1799, 0x179c, 0x179f, 0x17a2, 0x17a5,
+ 0x17a8, 0x17ab, 0x17ae, 0x17b1, 0x17b4, 0x17b7, 0x17ba, 0x17bd,
+ 0x17c0, 0x17c3, 0x17c6, 0x17c9, 0x17cc, 0x17cf, 0x17d2, 0x17d5,
+ 0x17d8, 0x17db, 0x17de, 0x17e1, 0x17e4, 0x17e7, 0x17ea, 0x17ed,
+ 0x17f0, 0x17f3, 0x17f6, 0x17f9, 0x17fc, 0x17ff, 0x1802, 0x1805,
+ 0x1808, 0x180b, 0x180e, 0x1811, 0x1814, 0x1817, 0x181a, 0x181d,
+ 0x1820, 0x1823, 0x1826, 0x1829, 0x182c, 0x182f, 0x1832, 0x1835,
+ // Entry 600 - 63F
+ 0x1838, 0x183b, 0x183e, 0x1841, 0x1844, 0x1847, 0x184a, 0x184d,
+ 0x1850, 0x1853, 0x1856, 0x1859, 0x185c, 0x185f, 0x1862, 0x1865,
+ 0x1868, 0x186b, 0x186e, 0x1871, 0x1874, 0x1877, 0x187a, 0x187d,
+ 0x1880, 0x1883, 0x1886, 0x1889, 0x188c, 0x188f, 0x1892, 0x1895,
+ 0x1898, 0x189b, 0x189e, 0x18a1, 0x18a4, 0x18a7, 0x18aa, 0x18ad,
+ 0x18b0, 0x18b3, 0x18b6, 0x18b9, 0x18bc, 0x18bf, 0x18c2, 0x18c5,
+ 0x18c8, 0x18cb, 0x18ce, 0x18d1, 0x18d4, 0x18d7, 0x18da, 0x18dd,
+ 0x18e0, 0x18e3, 0x18e6, 0x18e9, 0x18ec, 0x18ef, 0x18f2, 0x18f5,
+ // Entry 640 - 67F
+ 0x18f8, 0x18fb, 0x18fe, 0x1901, 0x1904, 0x1907, 0x190a, 0x190d,
+ 0x1910, 0x1913, 0x1916, 0x1919, 0x191c, 0x191f, 0x1922, 0x1925,
+ 0x1928, 0x192b, 0x192e, 0x1931, 0x1934, 0x1937, 0x193a, 0x193d,
+ 0x1940, 0x1943, 0x1946, 0x1949, 0x194c, 0x194f, 0x1952, 0x1955,
+ 0x1958, 0x195b, 0x195e, 0x1961, 0x1964, 0x1967, 0x196a, 0x196d,
+ 0x1970, 0x1973, 0x1976, 0x1979, 0x197c, 0x197f, 0x1982, 0x1985,
+ 0x1988, 0x198b,
+} // Size: 3324 bytes
+
+var xorData string = "" + // Size: 4862 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" +
+ "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" +
+ "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" +
+ "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" +
+ "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" +
+ "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" +
+ "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" +
+ "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" +
+ "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" +
+ "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" +
+ "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" +
+ "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" +
+ "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" +
+ "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" +
+ "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" +
+ "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" +
+ "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" +
+ "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" +
+ "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" +
+ "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" +
+ "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" +
+ "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" +
+ "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" +
+ "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" +
+ "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" +
+ "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" +
+ "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" +
+ "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" +
+ "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" +
+ "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" +
+ "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" +
+ "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" +
+ "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" +
+ "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" +
+ "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" +
+ "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" +
+ "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" +
+ "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" +
+ "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" +
+ "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" +
+ "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" +
+ "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" +
+ "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" +
+ "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" +
+ "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" +
+ "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" +
+ "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" +
+ "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" +
+ "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." +
+ "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" +
+ "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" +
+ "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" +
+ "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" +
+ "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" +
+ "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" +
+ "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" +
+ "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03" +
+ "\x09\x0c\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06" +
+ "!3\x03\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05" +
+ "\x03\x07<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" +
+ "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" +
+ "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" +
+ "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" +
+ "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" +
+ "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" +
+ ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" +
+ "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" +
+ "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" +
+ "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" +
+ "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" +
+ "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" +
+ "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" +
+ "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" +
+ "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" +
+ "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" +
+ "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" +
+ "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" +
+ ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 30196 bytes (29.49 KiB). Checksum: e2ae95a945f04016.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 126:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 126
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 128 blocks, 8192 entries, 16384 bytes
+// The third block is the zero block.
+var idnaValues = [8192]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x0012, 0xe9: 0x0018,
+ 0xea: 0x0019, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x0022,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0029, 0xf3: 0x0031, 0xf4: 0x003a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x0042, 0xf9: 0x0049, 0xfa: 0x0051, 0xfb: 0x0018,
+ 0xfc: 0x0059, 0xfd: 0x0061, 0xfe: 0x0069, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0071, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0079,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0079, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x0081, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x0089,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x0091, 0x1c5: 0x0091,
+ 0x1c6: 0x0091, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0099, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x00a1, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x00d2, 0x259: 0x00da, 0x25a: 0x00e2, 0x25b: 0x00ea, 0x25c: 0x00f2, 0x25d: 0x00fa,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0101, 0x262: 0x0089, 0x263: 0x0109,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0111, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x011a, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x0122, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x003a, 0x2c5: 0x012a,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0139,
+ 0x4b6: 0x0141, 0x4b7: 0x0149, 0x4b8: 0x0151, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0c08, 0x557: 0x0c08,
+ 0x558: 0x0c08, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x0159, 0x599: 0x0161, 0x59a: 0x0169, 0x59b: 0x0171, 0x59c: 0x0179, 0x59d: 0x0181,
+ 0x59e: 0x0189, 0x59f: 0x0191, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0199, 0x61d: 0x01a1,
+ 0x61e: 0x0040, 0x61f: 0x01a9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x01b1, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x01b9, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x01c1, 0x69a: 0x01c9, 0x69b: 0x01d1, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x01d9, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x3308, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x01e1, 0x79d: 0x01e9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0018, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0008, 0x90d: 0x0008, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008,
+ 0x912: 0x0008, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x01f9, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0211, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0219, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0221, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0229,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0231, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0239,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0241, 0x974: 0x3308, 0x975: 0x0249,
+ 0x976: 0x0251, 0x977: 0x0259, 0x978: 0x0261, 0x979: 0x0269, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0271, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0279, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0281,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0289, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0291, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0299, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x02a1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0019, 0x9ed: 0x02e1, 0x9ee: 0x02e9, 0x9ef: 0x0008,
+ 0x9f0: 0x02f1, 0x9f1: 0x02f9, 0x9f2: 0x0301, 0x9f3: 0x0309, 0x9f4: 0x00a9, 0x9f5: 0x0311,
+ 0x9f6: 0x00b1, 0x9f7: 0x0319, 0x9f8: 0x0101, 0x9f9: 0x0321, 0x9fa: 0x0329, 0x9fb: 0x0008,
+ 0x9fc: 0x0051, 0x9fd: 0x0331, 0x9fe: 0x0339, 0x9ff: 0x00b9,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0341, 0xa01: 0x0349, 0xa02: 0x00c1, 0xa03: 0x0019, 0xa04: 0x0351, 0xa05: 0x0359,
+ 0xa06: 0x05b5, 0xa07: 0x02e9, 0xa08: 0x02f1, 0xa09: 0x02f9, 0xa0a: 0x0361, 0xa0b: 0x0369,
+ 0xa0c: 0x0371, 0xa0d: 0x0309, 0xa0e: 0x0008, 0xa0f: 0x0319, 0xa10: 0x0321, 0xa11: 0x0379,
+ 0xa12: 0x0051, 0xa13: 0x0381, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0339, 0xa17: 0x0341,
+ 0xa18: 0x0349, 0xa19: 0x05b5, 0xa1a: 0x0389, 0xa1b: 0x0391, 0xa1c: 0x05e5, 0xa1d: 0x0399,
+ 0xa1e: 0x03a1, 0xa1f: 0x03a9, 0xa20: 0x03b1, 0xa21: 0x03b9, 0xa22: 0x0311, 0xa23: 0x00b9,
+ 0xa24: 0x0349, 0xa25: 0x0391, 0xa26: 0x0399, 0xa27: 0x03a1, 0xa28: 0x03c1, 0xa29: 0x03b1,
+ 0xa2a: 0x03b9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x03c9, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x03d1, 0xa5c: 0x03d9, 0xa5d: 0x03e1,
+ 0xa5e: 0x03e9, 0xa5f: 0x0371, 0xa60: 0x03f1, 0xa61: 0x03f9, 0xa62: 0x0401, 0xa63: 0x0409,
+ 0xa64: 0x0411, 0xa65: 0x0419, 0xa66: 0x0421, 0xa67: 0x05fd, 0xa68: 0x0429, 0xa69: 0x0431,
+ 0xa6a: 0xe17d, 0xa6b: 0x0439, 0xa6c: 0x0441, 0xa6d: 0x0449, 0xa6e: 0x0451, 0xa6f: 0x0459,
+ 0xa70: 0x0461, 0xa71: 0x0469, 0xa72: 0x0471, 0xa73: 0x0479, 0xa74: 0x0481, 0xa75: 0x0489,
+ 0xa76: 0x0491, 0xa77: 0x0499, 0xa78: 0x0615, 0xa79: 0x04a1, 0xa7a: 0x04a9, 0xa7b: 0x04b1,
+ 0xa7c: 0x04b9, 0xa7d: 0x04c1, 0xa7e: 0x04c9, 0xa7f: 0x04d1,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x04d9, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x04e1, 0xb32: 0x0008, 0xb33: 0x04e9, 0xb34: 0x0008, 0xb35: 0x04f1,
+ 0xb36: 0x0008, 0xb37: 0x04f9, 0xb38: 0x0008, 0xb39: 0x0501, 0xb3a: 0x0008, 0xb3b: 0x0509,
+ 0xb3c: 0x0008, 0xb3d: 0x0511, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x0519, 0xb41: 0x0521, 0xb42: 0x0529, 0xb43: 0x0531, 0xb44: 0x0539, 0xb45: 0x0541,
+ 0xb46: 0x0549, 0xb47: 0x0551, 0xb48: 0x0519, 0xb49: 0x0521, 0xb4a: 0x0529, 0xb4b: 0x0531,
+ 0xb4c: 0x0539, 0xb4d: 0x0541, 0xb4e: 0x0549, 0xb4f: 0x0551, 0xb50: 0x0559, 0xb51: 0x0561,
+ 0xb52: 0x0569, 0xb53: 0x0571, 0xb54: 0x0579, 0xb55: 0x0581, 0xb56: 0x0589, 0xb57: 0x0591,
+ 0xb58: 0x0559, 0xb59: 0x0561, 0xb5a: 0x0569, 0xb5b: 0x0571, 0xb5c: 0x0579, 0xb5d: 0x0581,
+ 0xb5e: 0x0589, 0xb5f: 0x0591, 0xb60: 0x0599, 0xb61: 0x05a1, 0xb62: 0x05a9, 0xb63: 0x05b1,
+ 0xb64: 0x05b9, 0xb65: 0x05c1, 0xb66: 0x05c9, 0xb67: 0x05d1, 0xb68: 0x0599, 0xb69: 0x05a1,
+ 0xb6a: 0x05a9, 0xb6b: 0x05b1, 0xb6c: 0x05b9, 0xb6d: 0x05c1, 0xb6e: 0x05c9, 0xb6f: 0x05d1,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x05d9, 0xb73: 0x05e1, 0xb74: 0x05e9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x05f1, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x04e1,
+ 0xb7c: 0x05e1, 0xb7d: 0x067e, 0xb7e: 0x05f9, 0xb7f: 0x069e,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06be, 0xb81: 0x0602, 0xb82: 0x0609, 0xb83: 0x0611, 0xb84: 0x0619, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x0621, 0xb88: 0x06dd, 0xb89: 0x04e9, 0xb8a: 0x06f5, 0xb8b: 0x04f1,
+ 0xb8c: 0x0611, 0xb8d: 0x062a, 0xb8e: 0x0632, 0xb8f: 0x063a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x0641, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x04f9, 0xb9c: 0x0040, 0xb9d: 0x064a,
+ 0xb9e: 0x0652, 0xb9f: 0x065a, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x0661,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x0725, 0xbab: 0x0509, 0xbac: 0xe04d, 0xbad: 0x066a, 0xbae: 0x012a, 0xbaf: 0x0672,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x0679, 0xbb3: 0x0681, 0xbb4: 0x0689, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x0691, 0xbb8: 0x073d, 0xbb9: 0x0501, 0xbba: 0x0515, 0xbbb: 0x0511,
+ 0xbbc: 0x0681, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x0796,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0699, 0xbf4: 0x06a1, 0xbf5: 0x0018,
+ 0xbf6: 0x06a9, 0xbf7: 0x06b1, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x06ba, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x06c2, 0xc08: 0x06ca, 0xc09: 0x06d2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x06d9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x06e1, 0xc31: 0x0311, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x06e9, 0xc35: 0x06f1,
+ 0xc36: 0x06f9, 0xc37: 0x0701, 0xc38: 0x0709, 0xc39: 0x0711, 0xc3a: 0x071a, 0xc3b: 0x07d5,
+ 0xc3c: 0x0722, 0xc3d: 0x072a, 0xc3e: 0x0732, 0xc3f: 0x0329,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x06e1, 0xc41: 0x0049, 0xc42: 0x0029, 0xc43: 0x0031, 0xc44: 0x06e9, 0xc45: 0x06f1,
+ 0xc46: 0x06f9, 0xc47: 0x0701, 0xc48: 0x0709, 0xc49: 0x0711, 0xc4a: 0x071a, 0xc4b: 0x07ed,
+ 0xc4c: 0x0722, 0xc4d: 0x072a, 0xc4e: 0x0732, 0xc4f: 0x0040, 0xc50: 0x0019, 0xc51: 0x02f9,
+ 0xc52: 0x0051, 0xc53: 0x0109, 0xc54: 0x0361, 0xc55: 0x00a9, 0xc56: 0x0319, 0xc57: 0x0101,
+ 0xc58: 0x0321, 0xc59: 0x0329, 0xc5a: 0x0339, 0xc5b: 0x0089, 0xc5c: 0x0341, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x0739, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x03d9, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866,
+ 0xc86: 0x0886, 0xc87: 0x0369, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0309, 0xc8b: 0x00a9,
+ 0xc8c: 0x00a9, 0xc8d: 0x00a9, 0xc8e: 0x00a9, 0xc8f: 0x0741, 0xc90: 0x0311, 0xc91: 0x0311,
+ 0xc92: 0x0101, 0xc93: 0x0101, 0xc94: 0x0018, 0xc95: 0x0329, 0xc96: 0x0749, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0339, 0xc9a: 0x0751, 0xc9b: 0x00b9, 0xc9c: 0x00b9, 0xc9d: 0x00b9,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x0759, 0xca1: 0x08c5, 0xca2: 0x0761, 0xca3: 0x0018,
+ 0xca4: 0x04b1, 0xca5: 0x0018, 0xca6: 0x0769, 0xca7: 0x0018, 0xca8: 0x04b1, 0xca9: 0x0018,
+ 0xcaa: 0x0319, 0xcab: 0x0771, 0xcac: 0x02e9, 0xcad: 0x03d9, 0xcae: 0x0018, 0xcaf: 0x02f9,
+ 0xcb0: 0x02f9, 0xcb1: 0x03f1, 0xcb2: 0x0040, 0xcb3: 0x0321, 0xcb4: 0x0051, 0xcb5: 0x0779,
+ 0xcb6: 0x0781, 0xcb7: 0x0789, 0xcb8: 0x0791, 0xcb9: 0x0311, 0xcba: 0x0018, 0xcbb: 0x08e5,
+ 0xcbc: 0x0799, 0xcbd: 0x03a1, 0xcbe: 0x03a1, 0xcbf: 0x0799,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x02f1,
+ 0xcc6: 0x02f1, 0xcc7: 0x02f9, 0xcc8: 0x0311, 0xcc9: 0x00b1, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x07a1, 0xcd1: 0x07a9,
+ 0xcd2: 0x07b1, 0xcd3: 0x07b9, 0xcd4: 0x07c1, 0xcd5: 0x07c9, 0xcd6: 0x07d1, 0xcd7: 0x07d9,
+ 0xcd8: 0x07e1, 0xcd9: 0x07e9, 0xcda: 0x07f1, 0xcdb: 0x07f9, 0xcdc: 0x0801, 0xcdd: 0x0809,
+ 0xcde: 0x0811, 0xcdf: 0x0819, 0xce0: 0x0311, 0xce1: 0x0821, 0xce2: 0x091d, 0xce3: 0x0829,
+ 0xce4: 0x0391, 0xce5: 0x0831, 0xce6: 0x093d, 0xce7: 0x0839, 0xce8: 0x0841, 0xce9: 0x0109,
+ 0xcea: 0x0849, 0xceb: 0x095d, 0xcec: 0x0101, 0xced: 0x03d9, 0xcee: 0x02f1, 0xcef: 0x0321,
+ 0xcf0: 0x0311, 0xcf1: 0x0821, 0xcf2: 0x097d, 0xcf3: 0x0829, 0xcf4: 0x0391, 0xcf5: 0x0831,
+ 0xcf6: 0x099d, 0xcf7: 0x0839, 0xcf8: 0x0841, 0xcf9: 0x0109, 0xcfa: 0x0849, 0xcfb: 0x09bd,
+ 0xcfc: 0x0101, 0xcfd: 0x03d9, 0xcfe: 0x02f1, 0xcff: 0x0321,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x0049, 0xd21: 0x0029, 0xd22: 0x0031, 0xd23: 0x06e9,
+ 0xd24: 0x06f1, 0xd25: 0x06f9, 0xd26: 0x0701, 0xd27: 0x0709, 0xd28: 0x0711, 0xd29: 0x0879,
+ 0xd2a: 0x0881, 0xd2b: 0x0889, 0xd2c: 0x0891, 0xd2d: 0x0899, 0xd2e: 0x08a1, 0xd2f: 0x08a9,
+ 0xd30: 0x08b1, 0xd31: 0x08b9, 0xd32: 0x08c1, 0xd33: 0x08c9, 0xd34: 0x0a1e, 0xd35: 0x0a3e,
+ 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe,
+ 0xd3c: 0x0b1e, 0xd3d: 0x08d2, 0xd3e: 0x08da, 0xd3f: 0x08e2,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x08ea, 0xd41: 0x08f2, 0xd42: 0x08fa, 0xd43: 0x0902, 0xd44: 0x090a, 0xd45: 0x0912,
+ 0xd46: 0x091a, 0xd47: 0x0922, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e,
+ 0xd5e: 0x0b7e, 0xd5f: 0x0b9e, 0xd60: 0x0bbe, 0xd61: 0x0bde, 0xd62: 0x0bfe, 0xd63: 0x0c1e,
+ 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde,
+ 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e,
+ 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e,
+ 0xd76: 0x0019, 0xd77: 0x02e9, 0xd78: 0x03d9, 0xd79: 0x02f1, 0xd7a: 0x02f9, 0xd7b: 0x03f1,
+ 0xd7c: 0x0309, 0xd7d: 0x00a9, 0xd7e: 0x0311, 0xd7f: 0x00b1,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0319, 0xd81: 0x0101, 0xd82: 0x0321, 0xd83: 0x0329, 0xd84: 0x0051, 0xd85: 0x0339,
+ 0xd86: 0x0751, 0xd87: 0x00b9, 0xd88: 0x0089, 0xd89: 0x0341, 0xd8a: 0x0349, 0xd8b: 0x0391,
+ 0xd8c: 0x00c1, 0xd8d: 0x0109, 0xd8e: 0x00c9, 0xd8f: 0x04b1, 0xd90: 0x0019, 0xd91: 0x02e9,
+ 0xd92: 0x03d9, 0xd93: 0x02f1, 0xd94: 0x02f9, 0xd95: 0x03f1, 0xd96: 0x0309, 0xd97: 0x00a9,
+ 0xd98: 0x0311, 0xd99: 0x00b1, 0xd9a: 0x0319, 0xd9b: 0x0101, 0xd9c: 0x0321, 0xd9d: 0x0329,
+ 0xd9e: 0x0051, 0xd9f: 0x0339, 0xda0: 0x0751, 0xda1: 0x00b9, 0xda2: 0x0089, 0xda3: 0x0341,
+ 0xda4: 0x0349, 0xda5: 0x0391, 0xda6: 0x00c1, 0xda7: 0x0109, 0xda8: 0x00c9, 0xda9: 0x04b1,
+ 0xdaa: 0x06e1, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x0941, 0xde3: 0x0ed5,
+ 0xde4: 0x0949, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0359, 0xdee: 0x0441, 0xdef: 0x0351,
+ 0xdf0: 0x03d1, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x00b1, 0xdfd: 0x0391, 0xdfe: 0x0951, 0xdff: 0x0959,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x2715, 0xe41: 0x2735, 0xe42: 0x2755, 0xe43: 0x2775, 0xe44: 0x2795, 0xe45: 0x27b5,
+ 0xe46: 0x27d5, 0xe47: 0x27f5, 0xe48: 0x2815, 0xe49: 0x2835, 0xe4a: 0x2855, 0xe4b: 0x2875,
+ 0xe4c: 0x2895, 0xe4d: 0x28b5, 0xe4e: 0x28d5, 0xe4f: 0x28f5, 0xe50: 0x2915, 0xe51: 0x2935,
+ 0xe52: 0x2955, 0xe53: 0x2975, 0xe54: 0x2995, 0xe55: 0x29b5, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x0961, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29d5, 0xeb9: 0x29f5, 0xeba: 0x2a15, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b55, 0xec1: 0x2b75, 0xec2: 0x2b95, 0xec3: 0x2bb5, 0xec4: 0x2bd5, 0xec5: 0x2bf5,
+ 0xec6: 0x2bf5, 0xec7: 0x2bf5, 0xec8: 0x2c15, 0xec9: 0x2c15, 0xeca: 0x2c15, 0xecb: 0x2c15,
+ 0xecc: 0x2c35, 0xecd: 0x2c35, 0xece: 0x2c35, 0xecf: 0x2c55, 0xed0: 0x2c75, 0xed1: 0x2c75,
+ 0xed2: 0x2a95, 0xed3: 0x2a95, 0xed4: 0x2c75, 0xed5: 0x2c75, 0xed6: 0x2c95, 0xed7: 0x2c95,
+ 0xed8: 0x2c75, 0xed9: 0x2c75, 0xeda: 0x2a95, 0xedb: 0x2a95, 0xedc: 0x2c75, 0xedd: 0x2c75,
+ 0xede: 0x2c55, 0xedf: 0x2c55, 0xee0: 0x2cb5, 0xee1: 0x2cb5, 0xee2: 0x2cd5, 0xee3: 0x2cd5,
+ 0xee4: 0x0040, 0xee5: 0x2cf5, 0xee6: 0x2d15, 0xee7: 0x2d35, 0xee8: 0x2d35, 0xee9: 0x2d55,
+ 0xeea: 0x2d75, 0xeeb: 0x2d95, 0xeec: 0x2db5, 0xeed: 0x2dd5, 0xeee: 0x2df5, 0xeef: 0x2e15,
+ 0xef0: 0x2e35, 0xef1: 0x2e55, 0xef2: 0x2e55, 0xef3: 0x2e75, 0xef4: 0x2e95, 0xef5: 0x2e95,
+ 0xef6: 0x2eb5, 0xef7: 0x2ed5, 0xef8: 0x2e75, 0xef9: 0x2ef5, 0xefa: 0x2f15, 0xefb: 0x2ef5,
+ 0xefc: 0x2e75, 0xefd: 0x2f35, 0xefe: 0x2f55, 0xeff: 0x2f75,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f95, 0xf01: 0x2fb5, 0xf02: 0x2d15, 0xf03: 0x2cf5, 0xf04: 0x2fd5, 0xf05: 0x2ff5,
+ 0xf06: 0x3015, 0xf07: 0x3035, 0xf08: 0x3055, 0xf09: 0x3075, 0xf0a: 0x3095, 0xf0b: 0x30b5,
+ 0xf0c: 0x30d5, 0xf0d: 0x30f5, 0xf0e: 0x3115, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x3135, 0xf13: 0x3155, 0xf14: 0x3175, 0xf15: 0x3195, 0xf16: 0x31b5, 0xf17: 0x31d5,
+ 0xf18: 0x31f5, 0xf19: 0x3215, 0xf1a: 0x3235, 0xf1b: 0x3255, 0xf1c: 0x3175, 0xf1d: 0x3275,
+ 0xf1e: 0x3295, 0xf1f: 0x32b5, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0008,
+ 0xf3c: 0x0008, 0xf3d: 0x0008, 0xf3e: 0x0008, 0xf3f: 0x0008,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x0b82, 0xf41: 0x0b8a, 0xf42: 0x0b92, 0xf43: 0x0b9a, 0xf44: 0x32d5, 0xf45: 0x32f5,
+ 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x0ba1,
+ 0xf52: 0x0ba9, 0xf53: 0x0bb1, 0xf54: 0x0bb9, 0xf55: 0x0bc1, 0xf56: 0x0bc9, 0xf57: 0x0bd1,
+ 0xf58: 0x0bd9, 0xf59: 0x0be1, 0xf5a: 0x0be9, 0xf5b: 0x0bf1, 0xf5c: 0x0bf9, 0xf5d: 0x0c01,
+ 0xf5e: 0x0c09, 0xf5f: 0x0c11, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5,
+ 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475,
+ 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535,
+ 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5,
+ 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5,
+ 0xf7c: 0x0c19, 0xf7d: 0x0c21, 0xf7e: 0x36d5, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795,
+ 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855,
+ 0xf8c: 0x3875, 0xf8d: 0x3895, 0xf8e: 0x38b5, 0xf8f: 0x38d5, 0xf90: 0x38f5, 0xf91: 0x3915,
+ 0xf92: 0x3935, 0xf93: 0x3955, 0xf94: 0x3975, 0xf95: 0x3995, 0xf96: 0x39b5, 0xf97: 0x39d5,
+ 0xf98: 0x39f5, 0xf99: 0x3a15, 0xf9a: 0x3a35, 0xf9b: 0x3a55, 0xf9c: 0x3a75, 0xf9d: 0x3a95,
+ 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55,
+ 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5,
+ 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95,
+ 0xfb0: 0x3cb5, 0xfb1: 0x0c29, 0xfb2: 0x0c31, 0xfb3: 0x0c39, 0xfb4: 0x0c41, 0xfb5: 0x0c49,
+ 0xfb6: 0x0c51, 0xfb7: 0x0c59, 0xfb8: 0x0c61, 0xfb9: 0x0c69, 0xfba: 0x0c71, 0xfbb: 0x0c79,
+ 0xfbc: 0x0c81, 0xfbd: 0x0c89, 0xfbe: 0x0c91, 0xfbf: 0x0c99,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x0ca1, 0xfc1: 0x0ca9, 0xfc2: 0x0cb1, 0xfc3: 0x0cb9, 0xfc4: 0x0cc1, 0xfc5: 0x0cc9,
+ 0xfc6: 0x0cd1, 0xfc7: 0x0cd9, 0xfc8: 0x0ce1, 0xfc9: 0x0ce9, 0xfca: 0x0cf1, 0xfcb: 0x0cf9,
+ 0xfcc: 0x0d01, 0xfcd: 0x3cd5, 0xfce: 0x0d09, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d,
+ 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d,
+ 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05,
+ 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95,
+ 0xfe4: 0x3ead, 0xfe5: 0x3ead, 0xfe6: 0x3ec5, 0xfe7: 0x3ec5, 0xfe8: 0x3edd, 0xfe9: 0x3edd,
+ 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55,
+ 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5,
+ 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015,
+ 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x0d11,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x10f9, 0x1001: 0x1101, 0x1002: 0x40a5, 0x1003: 0x1109, 0x1004: 0x1111, 0x1005: 0x1119,
+ 0x1006: 0x1121, 0x1007: 0x1129, 0x1008: 0x40c5, 0x1009: 0x1131, 0x100a: 0x1139, 0x100b: 0x1141,
+ 0x100c: 0x40e5, 0x100d: 0x40e5, 0x100e: 0x1149, 0x100f: 0x1151, 0x1010: 0x1159, 0x1011: 0x4105,
+ 0x1012: 0x4125, 0x1013: 0x4145, 0x1014: 0x4165, 0x1015: 0x4185, 0x1016: 0x1161, 0x1017: 0x1169,
+ 0x1018: 0x1171, 0x1019: 0x1179, 0x101a: 0x1181, 0x101b: 0x41a5, 0x101c: 0x1189, 0x101d: 0x1191,
+ 0x101e: 0x1199, 0x101f: 0x41c5, 0x1020: 0x41e5, 0x1021: 0x11a1, 0x1022: 0x4205, 0x1023: 0x4225,
+ 0x1024: 0x4245, 0x1025: 0x11a9, 0x1026: 0x4265, 0x1027: 0x11b1, 0x1028: 0x11b9, 0x1029: 0x10f9,
+ 0x102a: 0x4285, 0x102b: 0x42a5, 0x102c: 0x42c5, 0x102d: 0x42e5, 0x102e: 0x11c1, 0x102f: 0x11c9,
+ 0x1030: 0x11d1, 0x1031: 0x11d9, 0x1032: 0x4305, 0x1033: 0x11e1, 0x1034: 0x11e9, 0x1035: 0x11f1,
+ 0x1036: 0x4325, 0x1037: 0x11f9, 0x1038: 0x1201, 0x1039: 0x11f9, 0x103a: 0x1209, 0x103b: 0x1211,
+ 0x103c: 0x4345, 0x103d: 0x1219, 0x103e: 0x1221, 0x103f: 0x1219,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x4365, 0x1041: 0x4385, 0x1042: 0x0040, 0x1043: 0x1229, 0x1044: 0x1231, 0x1045: 0x1239,
+ 0x1046: 0x1241, 0x1047: 0x0040, 0x1048: 0x1249, 0x1049: 0x1251, 0x104a: 0x1259, 0x104b: 0x1261,
+ 0x104c: 0x1269, 0x104d: 0x1271, 0x104e: 0x1199, 0x104f: 0x1279, 0x1050: 0x1281, 0x1051: 0x1289,
+ 0x1052: 0x43a5, 0x1053: 0x1291, 0x1054: 0x1121, 0x1055: 0x43c5, 0x1056: 0x43e5, 0x1057: 0x1299,
+ 0x1058: 0x0040, 0x1059: 0x4405, 0x105a: 0x12a1, 0x105b: 0x12a9, 0x105c: 0x12b1, 0x105d: 0x12b9,
+ 0x105e: 0x12c1, 0x105f: 0x12c9, 0x1060: 0x12d1, 0x1061: 0x12d9, 0x1062: 0x12e1, 0x1063: 0x12e9,
+ 0x1064: 0x12f1, 0x1065: 0x12f9, 0x1066: 0x1301, 0x1067: 0x1309, 0x1068: 0x1311, 0x1069: 0x1319,
+ 0x106a: 0x1321, 0x106b: 0x1329, 0x106c: 0x1331, 0x106d: 0x1339, 0x106e: 0x1341, 0x106f: 0x1349,
+ 0x1070: 0x1351, 0x1071: 0x1359, 0x1072: 0x1361, 0x1073: 0x1369, 0x1074: 0x1371, 0x1075: 0x1379,
+ 0x1076: 0x1381, 0x1077: 0x1389, 0x1078: 0x1391, 0x1079: 0x1399, 0x107a: 0x13a1, 0x107b: 0x13a9,
+ 0x107c: 0x13b1, 0x107d: 0x13b9, 0x107e: 0x13c1, 0x107f: 0x4425,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008,
+ 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008,
+ 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008,
+ 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008,
+ 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008,
+ 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008,
+ 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008,
+ 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x3308,
+ 0x10b0: 0x3318, 0x10b1: 0x3318, 0x10b2: 0x3318, 0x10b3: 0x0018, 0x10b4: 0x3308, 0x10b5: 0x3308,
+ 0x10b6: 0x3308, 0x10b7: 0x3308, 0x10b8: 0x3308, 0x10b9: 0x3308, 0x10ba: 0x3308, 0x10bb: 0x3308,
+ 0x10bc: 0x3308, 0x10bd: 0x3308, 0x10be: 0x0018, 0x10bf: 0x0008,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008,
+ 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008,
+ 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008,
+ 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008,
+ 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x02d1, 0x10dd: 0x13c9,
+ 0x10de: 0x3308, 0x10df: 0x3308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008,
+ 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008,
+ 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008,
+ 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008,
+ 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008,
+ 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018,
+ 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018,
+ 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018,
+ 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008,
+ 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008,
+ 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008,
+ 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008,
+ 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008,
+ 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008,
+ 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008,
+ 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008,
+ 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008,
+ 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d,
+ 0x117c: 0x0008, 0x117d: 0x4445, 0x117e: 0xe00d, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008,
+ 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d,
+ 0x118c: 0x0008, 0x118d: 0x0409, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008,
+ 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008,
+ 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008,
+ 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0x13d1, 0x11ab: 0x0371, 0x11ac: 0x0401, 0x11ad: 0x13d9, 0x11ae: 0x0421, 0x11af: 0x0008,
+ 0x11b0: 0x13e1, 0x11b1: 0x13e9, 0x11b2: 0x0429, 0x11b3: 0x4465, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0x650d, 0x11c1: 0x652d, 0x11c2: 0x654d, 0x11c3: 0x656d, 0x11c4: 0x658d, 0x11c5: 0x65ad,
+ 0x11c6: 0x65cd, 0x11c7: 0x65ed, 0x11c8: 0x660d, 0x11c9: 0x662d, 0x11ca: 0x664d, 0x11cb: 0x666d,
+ 0x11cc: 0x668d, 0x11cd: 0x66ad, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x66cd, 0x11d1: 0x0008,
+ 0x11d2: 0x66ed, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x670d, 0x11d6: 0x672d, 0x11d7: 0x674d,
+ 0x11d8: 0x676d, 0x11d9: 0x678d, 0x11da: 0x67ad, 0x11db: 0x67cd, 0x11dc: 0x67ed, 0x11dd: 0x680d,
+ 0x11de: 0x682d, 0x11df: 0x0008, 0x11e0: 0x684d, 0x11e1: 0x0008, 0x11e2: 0x686d, 0x11e3: 0x0008,
+ 0x11e4: 0x0008, 0x11e5: 0x688d, 0x11e6: 0x68ad, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008,
+ 0x11ea: 0x68cd, 0x11eb: 0x68ed, 0x11ec: 0x690d, 0x11ed: 0x692d, 0x11ee: 0x694d, 0x11ef: 0x696d,
+ 0x11f0: 0x698d, 0x11f1: 0x69ad, 0x11f2: 0x69cd, 0x11f3: 0x69ed, 0x11f4: 0x6a0d, 0x11f5: 0x6a2d,
+ 0x11f6: 0x6a4d, 0x11f7: 0x6a6d, 0x11f8: 0x6a8d, 0x11f9: 0x6aad, 0x11fa: 0x6acd, 0x11fb: 0x6aed,
+ 0x11fc: 0x6b0d, 0x11fd: 0x6b2d, 0x11fe: 0x6b4d, 0x11ff: 0x6b6d,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x7acd, 0x1201: 0x7aed, 0x1202: 0x7b0d, 0x1203: 0x7b2d, 0x1204: 0x7b4d, 0x1205: 0x7b6d,
+ 0x1206: 0x7b8d, 0x1207: 0x7bad, 0x1208: 0x7bcd, 0x1209: 0x7bed, 0x120a: 0x7c0d, 0x120b: 0x7c2d,
+ 0x120c: 0x7c4d, 0x120d: 0x7c6d, 0x120e: 0x7c8d, 0x120f: 0x1409, 0x1210: 0x1411, 0x1211: 0x1419,
+ 0x1212: 0x7cad, 0x1213: 0x7ccd, 0x1214: 0x7ced, 0x1215: 0x1421, 0x1216: 0x1429, 0x1217: 0x1431,
+ 0x1218: 0x7d0d, 0x1219: 0x7d2d, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040,
+ 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040,
+ 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040,
+ 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040,
+ 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040,
+ 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040,
+ 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x1439, 0x1241: 0x1441, 0x1242: 0x1449, 0x1243: 0x7d4d, 0x1244: 0x7d6d, 0x1245: 0x1451,
+ 0x1246: 0x1451, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040,
+ 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040,
+ 0x1252: 0x0040, 0x1253: 0x1459, 0x1254: 0x1461, 0x1255: 0x1469, 0x1256: 0x1471, 0x1257: 0x1479,
+ 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x1481,
+ 0x125e: 0x3308, 0x125f: 0x1489, 0x1260: 0x1491, 0x1261: 0x0779, 0x1262: 0x0791, 0x1263: 0x1499,
+ 0x1264: 0x14a1, 0x1265: 0x14a9, 0x1266: 0x14b1, 0x1267: 0x14b9, 0x1268: 0x14c1, 0x1269: 0x071a,
+ 0x126a: 0x14c9, 0x126b: 0x14d1, 0x126c: 0x14d9, 0x126d: 0x14e1, 0x126e: 0x14e9, 0x126f: 0x14f1,
+ 0x1270: 0x14f9, 0x1271: 0x1501, 0x1272: 0x1509, 0x1273: 0x1511, 0x1274: 0x1519, 0x1275: 0x1521,
+ 0x1276: 0x1529, 0x1277: 0x0040, 0x1278: 0x1531, 0x1279: 0x1539, 0x127a: 0x1541, 0x127b: 0x1549,
+ 0x127c: 0x1551, 0x127d: 0x0040, 0x127e: 0x1559, 0x127f: 0x0040,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x1561, 0x1281: 0x1569, 0x1282: 0x0040, 0x1283: 0x1571, 0x1284: 0x1579, 0x1285: 0x0040,
+ 0x1286: 0x1581, 0x1287: 0x1589, 0x1288: 0x1591, 0x1289: 0x1599, 0x128a: 0x15a1, 0x128b: 0x15a9,
+ 0x128c: 0x15b1, 0x128d: 0x15b9, 0x128e: 0x15c1, 0x128f: 0x15c9, 0x1290: 0x15d1, 0x1291: 0x15d1,
+ 0x1292: 0x15d9, 0x1293: 0x15d9, 0x1294: 0x15d9, 0x1295: 0x15d9, 0x1296: 0x15e1, 0x1297: 0x15e1,
+ 0x1298: 0x15e1, 0x1299: 0x15e1, 0x129a: 0x15e9, 0x129b: 0x15e9, 0x129c: 0x15e9, 0x129d: 0x15e9,
+ 0x129e: 0x15f1, 0x129f: 0x15f1, 0x12a0: 0x15f1, 0x12a1: 0x15f1, 0x12a2: 0x15f9, 0x12a3: 0x15f9,
+ 0x12a4: 0x15f9, 0x12a5: 0x15f9, 0x12a6: 0x1601, 0x12a7: 0x1601, 0x12a8: 0x1601, 0x12a9: 0x1601,
+ 0x12aa: 0x1609, 0x12ab: 0x1609, 0x12ac: 0x1609, 0x12ad: 0x1609, 0x12ae: 0x1611, 0x12af: 0x1611,
+ 0x12b0: 0x1611, 0x12b1: 0x1611, 0x12b2: 0x1619, 0x12b3: 0x1619, 0x12b4: 0x1619, 0x12b5: 0x1619,
+ 0x12b6: 0x1621, 0x12b7: 0x1621, 0x12b8: 0x1621, 0x12b9: 0x1621, 0x12ba: 0x1629, 0x12bb: 0x1629,
+ 0x12bc: 0x1629, 0x12bd: 0x1629, 0x12be: 0x1631, 0x12bf: 0x1631,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x1631, 0x12c1: 0x1631, 0x12c2: 0x1639, 0x12c3: 0x1639, 0x12c4: 0x1641, 0x12c5: 0x1641,
+ 0x12c6: 0x1649, 0x12c7: 0x1649, 0x12c8: 0x1651, 0x12c9: 0x1651, 0x12ca: 0x1659, 0x12cb: 0x1659,
+ 0x12cc: 0x1661, 0x12cd: 0x1661, 0x12ce: 0x1669, 0x12cf: 0x1669, 0x12d0: 0x1669, 0x12d1: 0x1669,
+ 0x12d2: 0x1671, 0x12d3: 0x1671, 0x12d4: 0x1671, 0x12d5: 0x1671, 0x12d6: 0x1679, 0x12d7: 0x1679,
+ 0x12d8: 0x1679, 0x12d9: 0x1679, 0x12da: 0x1681, 0x12db: 0x1681, 0x12dc: 0x1681, 0x12dd: 0x1681,
+ 0x12de: 0x1689, 0x12df: 0x1689, 0x12e0: 0x1691, 0x12e1: 0x1691, 0x12e2: 0x1691, 0x12e3: 0x1691,
+ 0x12e4: 0x1699, 0x12e5: 0x1699, 0x12e6: 0x16a1, 0x12e7: 0x16a1, 0x12e8: 0x16a1, 0x12e9: 0x16a1,
+ 0x12ea: 0x16a9, 0x12eb: 0x16a9, 0x12ec: 0x16a9, 0x12ed: 0x16a9, 0x12ee: 0x16b1, 0x12ef: 0x16b1,
+ 0x12f0: 0x16b9, 0x12f1: 0x16b9, 0x12f2: 0x0818, 0x12f3: 0x0818, 0x12f4: 0x0818, 0x12f5: 0x0818,
+ 0x12f6: 0x0818, 0x12f7: 0x0818, 0x12f8: 0x0818, 0x12f9: 0x0818, 0x12fa: 0x0818, 0x12fb: 0x0818,
+ 0x12fc: 0x0818, 0x12fd: 0x0818, 0x12fe: 0x0818, 0x12ff: 0x0818,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x0818, 0x1301: 0x0818, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040,
+ 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040,
+ 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040,
+ 0x1312: 0x0040, 0x1313: 0x16c1, 0x1314: 0x16c1, 0x1315: 0x16c1, 0x1316: 0x16c1, 0x1317: 0x16c9,
+ 0x1318: 0x16c9, 0x1319: 0x16d1, 0x131a: 0x16d1, 0x131b: 0x16d9, 0x131c: 0x16d9, 0x131d: 0x0149,
+ 0x131e: 0x16e1, 0x131f: 0x16e1, 0x1320: 0x16e9, 0x1321: 0x16e9, 0x1322: 0x16f1, 0x1323: 0x16f1,
+ 0x1324: 0x16f9, 0x1325: 0x16f9, 0x1326: 0x16f9, 0x1327: 0x16f9, 0x1328: 0x1701, 0x1329: 0x1701,
+ 0x132a: 0x1709, 0x132b: 0x1709, 0x132c: 0x1711, 0x132d: 0x1711, 0x132e: 0x1719, 0x132f: 0x1719,
+ 0x1330: 0x1721, 0x1331: 0x1721, 0x1332: 0x1729, 0x1333: 0x1729, 0x1334: 0x1731, 0x1335: 0x1731,
+ 0x1336: 0x1739, 0x1337: 0x1739, 0x1338: 0x1739, 0x1339: 0x1741, 0x133a: 0x1741, 0x133b: 0x1741,
+ 0x133c: 0x1749, 0x133d: 0x1749, 0x133e: 0x1749, 0x133f: 0x1749,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x1949, 0x1341: 0x1951, 0x1342: 0x1959, 0x1343: 0x1961, 0x1344: 0x1969, 0x1345: 0x1971,
+ 0x1346: 0x1979, 0x1347: 0x1981, 0x1348: 0x1989, 0x1349: 0x1991, 0x134a: 0x1999, 0x134b: 0x19a1,
+ 0x134c: 0x19a9, 0x134d: 0x19b1, 0x134e: 0x19b9, 0x134f: 0x19c1, 0x1350: 0x19c9, 0x1351: 0x19d1,
+ 0x1352: 0x19d9, 0x1353: 0x19e1, 0x1354: 0x19e9, 0x1355: 0x19f1, 0x1356: 0x19f9, 0x1357: 0x1a01,
+ 0x1358: 0x1a09, 0x1359: 0x1a11, 0x135a: 0x1a19, 0x135b: 0x1a21, 0x135c: 0x1a29, 0x135d: 0x1a31,
+ 0x135e: 0x1a3a, 0x135f: 0x1a42, 0x1360: 0x1a4a, 0x1361: 0x1a52, 0x1362: 0x1a5a, 0x1363: 0x1a62,
+ 0x1364: 0x1a69, 0x1365: 0x1a71, 0x1366: 0x1761, 0x1367: 0x1a79, 0x1368: 0x1741, 0x1369: 0x1769,
+ 0x136a: 0x1a81, 0x136b: 0x1a89, 0x136c: 0x1789, 0x136d: 0x1a91, 0x136e: 0x1791, 0x136f: 0x1799,
+ 0x1370: 0x1a99, 0x1371: 0x1aa1, 0x1372: 0x17b9, 0x1373: 0x1aa9, 0x1374: 0x17c1, 0x1375: 0x17c9,
+ 0x1376: 0x1ab1, 0x1377: 0x1ab9, 0x1378: 0x17d9, 0x1379: 0x1ac1, 0x137a: 0x17e1, 0x137b: 0x17e9,
+ 0x137c: 0x18d1, 0x137d: 0x18d9, 0x137e: 0x18f1, 0x137f: 0x18f9,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x1901, 0x1381: 0x1921, 0x1382: 0x1929, 0x1383: 0x1931, 0x1384: 0x1939, 0x1385: 0x1959,
+ 0x1386: 0x1961, 0x1387: 0x1969, 0x1388: 0x1ac9, 0x1389: 0x1989, 0x138a: 0x1ad1, 0x138b: 0x1ad9,
+ 0x138c: 0x19b9, 0x138d: 0x1ae1, 0x138e: 0x19c1, 0x138f: 0x19c9, 0x1390: 0x1a31, 0x1391: 0x1ae9,
+ 0x1392: 0x1af1, 0x1393: 0x1a09, 0x1394: 0x1af9, 0x1395: 0x1a11, 0x1396: 0x1a19, 0x1397: 0x1751,
+ 0x1398: 0x1759, 0x1399: 0x1b01, 0x139a: 0x1761, 0x139b: 0x1b09, 0x139c: 0x1771, 0x139d: 0x1779,
+ 0x139e: 0x1781, 0x139f: 0x1789, 0x13a0: 0x1b11, 0x13a1: 0x17a1, 0x13a2: 0x17a9, 0x13a3: 0x17b1,
+ 0x13a4: 0x17b9, 0x13a5: 0x1b19, 0x13a6: 0x17d9, 0x13a7: 0x17f1, 0x13a8: 0x17f9, 0x13a9: 0x1801,
+ 0x13aa: 0x1809, 0x13ab: 0x1811, 0x13ac: 0x1821, 0x13ad: 0x1829, 0x13ae: 0x1831, 0x13af: 0x1839,
+ 0x13b0: 0x1841, 0x13b1: 0x1849, 0x13b2: 0x1b21, 0x13b3: 0x1851, 0x13b4: 0x1859, 0x13b5: 0x1861,
+ 0x13b6: 0x1869, 0x13b7: 0x1871, 0x13b8: 0x1879, 0x13b9: 0x1889, 0x13ba: 0x1891, 0x13bb: 0x1899,
+ 0x13bc: 0x18a1, 0x13bd: 0x18a9, 0x13be: 0x18b1, 0x13bf: 0x18b9,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x18c1, 0x13c1: 0x18c9, 0x13c2: 0x18e1, 0x13c3: 0x18e9, 0x13c4: 0x1909, 0x13c5: 0x1911,
+ 0x13c6: 0x1919, 0x13c7: 0x1921, 0x13c8: 0x1929, 0x13c9: 0x1941, 0x13ca: 0x1949, 0x13cb: 0x1951,
+ 0x13cc: 0x1959, 0x13cd: 0x1b29, 0x13ce: 0x1971, 0x13cf: 0x1979, 0x13d0: 0x1981, 0x13d1: 0x1989,
+ 0x13d2: 0x19a1, 0x13d3: 0x19a9, 0x13d4: 0x19b1, 0x13d5: 0x19b9, 0x13d6: 0x1b31, 0x13d7: 0x19d1,
+ 0x13d8: 0x19d9, 0x13d9: 0x1b39, 0x13da: 0x19f1, 0x13db: 0x19f9, 0x13dc: 0x1a01, 0x13dd: 0x1a09,
+ 0x13de: 0x1b41, 0x13df: 0x1761, 0x13e0: 0x1b09, 0x13e1: 0x1789, 0x13e2: 0x1b11, 0x13e3: 0x17b9,
+ 0x13e4: 0x1b19, 0x13e5: 0x17d9, 0x13e6: 0x1b49, 0x13e7: 0x1841, 0x13e8: 0x1b51, 0x13e9: 0x1b59,
+ 0x13ea: 0x1b61, 0x13eb: 0x1921, 0x13ec: 0x1929, 0x13ed: 0x1959, 0x13ee: 0x19b9, 0x13ef: 0x1b31,
+ 0x13f0: 0x1a09, 0x13f1: 0x1b41, 0x13f2: 0x1b69, 0x13f3: 0x1b71, 0x13f4: 0x1b79, 0x13f5: 0x1b81,
+ 0x13f6: 0x1b89, 0x13f7: 0x1b91, 0x13f8: 0x1b99, 0x13f9: 0x1ba1, 0x13fa: 0x1ba9, 0x13fb: 0x1bb1,
+ 0x13fc: 0x1bb9, 0x13fd: 0x1bc1, 0x13fe: 0x1bc9, 0x13ff: 0x1bd1,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x1bd9, 0x1401: 0x1be1, 0x1402: 0x1be9, 0x1403: 0x1bf1, 0x1404: 0x1bf9, 0x1405: 0x1c01,
+ 0x1406: 0x1c09, 0x1407: 0x1c11, 0x1408: 0x1c19, 0x1409: 0x1c21, 0x140a: 0x1c29, 0x140b: 0x1c31,
+ 0x140c: 0x1b59, 0x140d: 0x1c39, 0x140e: 0x1c41, 0x140f: 0x1c49, 0x1410: 0x1c51, 0x1411: 0x1b81,
+ 0x1412: 0x1b89, 0x1413: 0x1b91, 0x1414: 0x1b99, 0x1415: 0x1ba1, 0x1416: 0x1ba9, 0x1417: 0x1bb1,
+ 0x1418: 0x1bb9, 0x1419: 0x1bc1, 0x141a: 0x1bc9, 0x141b: 0x1bd1, 0x141c: 0x1bd9, 0x141d: 0x1be1,
+ 0x141e: 0x1be9, 0x141f: 0x1bf1, 0x1420: 0x1bf9, 0x1421: 0x1c01, 0x1422: 0x1c09, 0x1423: 0x1c11,
+ 0x1424: 0x1c19, 0x1425: 0x1c21, 0x1426: 0x1c29, 0x1427: 0x1c31, 0x1428: 0x1b59, 0x1429: 0x1c39,
+ 0x142a: 0x1c41, 0x142b: 0x1c49, 0x142c: 0x1c51, 0x142d: 0x1c21, 0x142e: 0x1c29, 0x142f: 0x1c31,
+ 0x1430: 0x1b59, 0x1431: 0x1b51, 0x1432: 0x1b61, 0x1433: 0x1881, 0x1434: 0x1829, 0x1435: 0x1831,
+ 0x1436: 0x1839, 0x1437: 0x1c21, 0x1438: 0x1c29, 0x1439: 0x1c31, 0x143a: 0x1881, 0x143b: 0x1889,
+ 0x143c: 0x1c59, 0x143d: 0x1c59, 0x143e: 0x0018, 0x143f: 0x0018,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040,
+ 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040,
+ 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x1c61, 0x1451: 0x1c69,
+ 0x1452: 0x1c69, 0x1453: 0x1c71, 0x1454: 0x1c79, 0x1455: 0x1c81, 0x1456: 0x1c89, 0x1457: 0x1c91,
+ 0x1458: 0x1c99, 0x1459: 0x1c99, 0x145a: 0x1ca1, 0x145b: 0x1ca9, 0x145c: 0x1cb1, 0x145d: 0x1cb9,
+ 0x145e: 0x1cc1, 0x145f: 0x1cc9, 0x1460: 0x1cc9, 0x1461: 0x1cd1, 0x1462: 0x1cd9, 0x1463: 0x1cd9,
+ 0x1464: 0x1ce1, 0x1465: 0x1ce1, 0x1466: 0x1ce9, 0x1467: 0x1cf1, 0x1468: 0x1cf1, 0x1469: 0x1cf9,
+ 0x146a: 0x1d01, 0x146b: 0x1d01, 0x146c: 0x1d09, 0x146d: 0x1d09, 0x146e: 0x1d11, 0x146f: 0x1d19,
+ 0x1470: 0x1d19, 0x1471: 0x1d21, 0x1472: 0x1d21, 0x1473: 0x1d29, 0x1474: 0x1d31, 0x1475: 0x1d39,
+ 0x1476: 0x1d41, 0x1477: 0x1d41, 0x1478: 0x1d49, 0x1479: 0x1d51, 0x147a: 0x1d59, 0x147b: 0x1d61,
+ 0x147c: 0x1d69, 0x147d: 0x1d69, 0x147e: 0x1d71, 0x147f: 0x1d79,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x1f29, 0x1481: 0x1f31, 0x1482: 0x1f39, 0x1483: 0x1f11, 0x1484: 0x1d39, 0x1485: 0x1ce9,
+ 0x1486: 0x1f41, 0x1487: 0x1f49, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040,
+ 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040,
+ 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040,
+ 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040,
+ 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040,
+ 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040,
+ 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040,
+ 0x14b0: 0x1f51, 0x14b1: 0x1f59, 0x14b2: 0x1f61, 0x14b3: 0x1f69, 0x14b4: 0x1f71, 0x14b5: 0x1f79,
+ 0x14b6: 0x1f81, 0x14b7: 0x1f89, 0x14b8: 0x1f91, 0x14b9: 0x1f99, 0x14ba: 0x1fa2, 0x14bb: 0x1faa,
+ 0x14bc: 0x1fb1, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x33c0, 0x14c1: 0x33c0, 0x14c2: 0x33c0, 0x14c3: 0x33c0, 0x14c4: 0x33c0, 0x14c5: 0x33c0,
+ 0x14c6: 0x33c0, 0x14c7: 0x33c0, 0x14c8: 0x33c0, 0x14c9: 0x33c0, 0x14ca: 0x33c0, 0x14cb: 0x33c0,
+ 0x14cc: 0x33c0, 0x14cd: 0x33c0, 0x14ce: 0x33c0, 0x14cf: 0x33c0, 0x14d0: 0x1fba, 0x14d1: 0x7d8d,
+ 0x14d2: 0x0040, 0x14d3: 0x1fc2, 0x14d4: 0x0122, 0x14d5: 0x1fca, 0x14d6: 0x1fd2, 0x14d7: 0x7dad,
+ 0x14d8: 0x7dcd, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040,
+ 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x3308, 0x14e1: 0x3308, 0x14e2: 0x3308, 0x14e3: 0x3308,
+ 0x14e4: 0x3308, 0x14e5: 0x3308, 0x14e6: 0x3308, 0x14e7: 0x3308, 0x14e8: 0x3308, 0x14e9: 0x3308,
+ 0x14ea: 0x3308, 0x14eb: 0x3308, 0x14ec: 0x3308, 0x14ed: 0x3308, 0x14ee: 0x3308, 0x14ef: 0x3308,
+ 0x14f0: 0x0040, 0x14f1: 0x7ded, 0x14f2: 0x7e0d, 0x14f3: 0x1fda, 0x14f4: 0x1fda, 0x14f5: 0x072a,
+ 0x14f6: 0x0732, 0x14f7: 0x1fe2, 0x14f8: 0x1fea, 0x14f9: 0x7e2d, 0x14fa: 0x7e4d, 0x14fb: 0x7e6d,
+ 0x14fc: 0x7e2d, 0x14fd: 0x7e8d, 0x14fe: 0x7ead, 0x14ff: 0x7e8d,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x7ecd, 0x1501: 0x7eed, 0x1502: 0x7f0d, 0x1503: 0x7eed, 0x1504: 0x7f2d, 0x1505: 0x0018,
+ 0x1506: 0x0018, 0x1507: 0x1ff2, 0x1508: 0x1ffa, 0x1509: 0x7f4e, 0x150a: 0x7f6e, 0x150b: 0x7f8e,
+ 0x150c: 0x7fae, 0x150d: 0x1fda, 0x150e: 0x1fda, 0x150f: 0x1fda, 0x1510: 0x1fba, 0x1511: 0x7fcd,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0122, 0x1515: 0x1fc2, 0x1516: 0x1fd2, 0x1517: 0x1fca,
+ 0x1518: 0x7fed, 0x1519: 0x072a, 0x151a: 0x0732, 0x151b: 0x1fe2, 0x151c: 0x1fea, 0x151d: 0x7ecd,
+ 0x151e: 0x7f2d, 0x151f: 0x2002, 0x1520: 0x200a, 0x1521: 0x2012, 0x1522: 0x071a, 0x1523: 0x2019,
+ 0x1524: 0x2022, 0x1525: 0x202a, 0x1526: 0x0722, 0x1527: 0x0040, 0x1528: 0x2032, 0x1529: 0x203a,
+ 0x152a: 0x2042, 0x152b: 0x204a, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0x800e, 0x1531: 0x2051, 0x1532: 0x802e, 0x1533: 0x0808, 0x1534: 0x804e, 0x1535: 0x0040,
+ 0x1536: 0x806e, 0x1537: 0x2059, 0x1538: 0x808e, 0x1539: 0x2061, 0x153a: 0x80ae, 0x153b: 0x2069,
+ 0x153c: 0x80ce, 0x153d: 0x2071, 0x153e: 0x80ee, 0x153f: 0x2079,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x2081, 0x1541: 0x2089, 0x1542: 0x2089, 0x1543: 0x2091, 0x1544: 0x2091, 0x1545: 0x2099,
+ 0x1546: 0x2099, 0x1547: 0x20a1, 0x1548: 0x20a1, 0x1549: 0x20a9, 0x154a: 0x20a9, 0x154b: 0x20a9,
+ 0x154c: 0x20a9, 0x154d: 0x20b1, 0x154e: 0x20b1, 0x154f: 0x20b9, 0x1550: 0x20b9, 0x1551: 0x20b9,
+ 0x1552: 0x20b9, 0x1553: 0x20c1, 0x1554: 0x20c1, 0x1555: 0x20c9, 0x1556: 0x20c9, 0x1557: 0x20c9,
+ 0x1558: 0x20c9, 0x1559: 0x20d1, 0x155a: 0x20d1, 0x155b: 0x20d1, 0x155c: 0x20d1, 0x155d: 0x20d9,
+ 0x155e: 0x20d9, 0x155f: 0x20d9, 0x1560: 0x20d9, 0x1561: 0x20e1, 0x1562: 0x20e1, 0x1563: 0x20e1,
+ 0x1564: 0x20e1, 0x1565: 0x20e9, 0x1566: 0x20e9, 0x1567: 0x20e9, 0x1568: 0x20e9, 0x1569: 0x20f1,
+ 0x156a: 0x20f1, 0x156b: 0x20f9, 0x156c: 0x20f9, 0x156d: 0x2101, 0x156e: 0x2101, 0x156f: 0x2109,
+ 0x1570: 0x2109, 0x1571: 0x2111, 0x1572: 0x2111, 0x1573: 0x2111, 0x1574: 0x2111, 0x1575: 0x2119,
+ 0x1576: 0x2119, 0x1577: 0x2119, 0x1578: 0x2119, 0x1579: 0x2121, 0x157a: 0x2121, 0x157b: 0x2121,
+ 0x157c: 0x2121, 0x157d: 0x2129, 0x157e: 0x2129, 0x157f: 0x2129,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x2129, 0x1581: 0x2131, 0x1582: 0x2131, 0x1583: 0x2131, 0x1584: 0x2131, 0x1585: 0x2139,
+ 0x1586: 0x2139, 0x1587: 0x2139, 0x1588: 0x2139, 0x1589: 0x2141, 0x158a: 0x2141, 0x158b: 0x2141,
+ 0x158c: 0x2141, 0x158d: 0x2149, 0x158e: 0x2149, 0x158f: 0x2149, 0x1590: 0x2149, 0x1591: 0x2151,
+ 0x1592: 0x2151, 0x1593: 0x2151, 0x1594: 0x2151, 0x1595: 0x2159, 0x1596: 0x2159, 0x1597: 0x2159,
+ 0x1598: 0x2159, 0x1599: 0x2161, 0x159a: 0x2161, 0x159b: 0x2161, 0x159c: 0x2161, 0x159d: 0x2169,
+ 0x159e: 0x2169, 0x159f: 0x2169, 0x15a0: 0x2169, 0x15a1: 0x2171, 0x15a2: 0x2171, 0x15a3: 0x2171,
+ 0x15a4: 0x2171, 0x15a5: 0x2179, 0x15a6: 0x2179, 0x15a7: 0x2179, 0x15a8: 0x2179, 0x15a9: 0x2181,
+ 0x15aa: 0x2181, 0x15ab: 0x2181, 0x15ac: 0x2181, 0x15ad: 0x2189, 0x15ae: 0x2189, 0x15af: 0x1701,
+ 0x15b0: 0x1701, 0x15b1: 0x2191, 0x15b2: 0x2191, 0x15b3: 0x2191, 0x15b4: 0x2191, 0x15b5: 0x2199,
+ 0x15b6: 0x2199, 0x15b7: 0x21a1, 0x15b8: 0x21a1, 0x15b9: 0x21a9, 0x15ba: 0x21a9, 0x15bb: 0x21b1,
+ 0x15bc: 0x21b1, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0x0040, 0x15c1: 0x1fca, 0x15c2: 0x21ba, 0x15c3: 0x2002, 0x15c4: 0x203a, 0x15c5: 0x2042,
+ 0x15c6: 0x200a, 0x15c7: 0x21c2, 0x15c8: 0x072a, 0x15c9: 0x0732, 0x15ca: 0x2012, 0x15cb: 0x071a,
+ 0x15cc: 0x1fba, 0x15cd: 0x2019, 0x15ce: 0x0961, 0x15cf: 0x21ca, 0x15d0: 0x06e1, 0x15d1: 0x0049,
+ 0x15d2: 0x0029, 0x15d3: 0x0031, 0x15d4: 0x06e9, 0x15d5: 0x06f1, 0x15d6: 0x06f9, 0x15d7: 0x0701,
+ 0x15d8: 0x0709, 0x15d9: 0x0711, 0x15da: 0x1fc2, 0x15db: 0x0122, 0x15dc: 0x2022, 0x15dd: 0x0722,
+ 0x15de: 0x202a, 0x15df: 0x1fd2, 0x15e0: 0x204a, 0x15e1: 0x0019, 0x15e2: 0x02e9, 0x15e3: 0x03d9,
+ 0x15e4: 0x02f1, 0x15e5: 0x02f9, 0x15e6: 0x03f1, 0x15e7: 0x0309, 0x15e8: 0x00a9, 0x15e9: 0x0311,
+ 0x15ea: 0x00b1, 0x15eb: 0x0319, 0x15ec: 0x0101, 0x15ed: 0x0321, 0x15ee: 0x0329, 0x15ef: 0x0051,
+ 0x15f0: 0x0339, 0x15f1: 0x0751, 0x15f2: 0x00b9, 0x15f3: 0x0089, 0x15f4: 0x0341, 0x15f5: 0x0349,
+ 0x15f6: 0x0391, 0x15f7: 0x00c1, 0x15f8: 0x0109, 0x15f9: 0x00c9, 0x15fa: 0x04b1, 0x15fb: 0x1ff2,
+ 0x15fc: 0x2032, 0x15fd: 0x1ffa, 0x15fe: 0x21d2, 0x15ff: 0x1fda,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x0672, 0x1601: 0x0019, 0x1602: 0x02e9, 0x1603: 0x03d9, 0x1604: 0x02f1, 0x1605: 0x02f9,
+ 0x1606: 0x03f1, 0x1607: 0x0309, 0x1608: 0x00a9, 0x1609: 0x0311, 0x160a: 0x00b1, 0x160b: 0x0319,
+ 0x160c: 0x0101, 0x160d: 0x0321, 0x160e: 0x0329, 0x160f: 0x0051, 0x1610: 0x0339, 0x1611: 0x0751,
+ 0x1612: 0x00b9, 0x1613: 0x0089, 0x1614: 0x0341, 0x1615: 0x0349, 0x1616: 0x0391, 0x1617: 0x00c1,
+ 0x1618: 0x0109, 0x1619: 0x00c9, 0x161a: 0x04b1, 0x161b: 0x1fe2, 0x161c: 0x21da, 0x161d: 0x1fea,
+ 0x161e: 0x21e2, 0x161f: 0x810d, 0x1620: 0x812d, 0x1621: 0x0961, 0x1622: 0x814d, 0x1623: 0x814d,
+ 0x1624: 0x816d, 0x1625: 0x818d, 0x1626: 0x81ad, 0x1627: 0x81cd, 0x1628: 0x81ed, 0x1629: 0x820d,
+ 0x162a: 0x822d, 0x162b: 0x824d, 0x162c: 0x826d, 0x162d: 0x828d, 0x162e: 0x82ad, 0x162f: 0x82cd,
+ 0x1630: 0x82ed, 0x1631: 0x830d, 0x1632: 0x832d, 0x1633: 0x834d, 0x1634: 0x836d, 0x1635: 0x838d,
+ 0x1636: 0x83ad, 0x1637: 0x83cd, 0x1638: 0x83ed, 0x1639: 0x840d, 0x163a: 0x842d, 0x163b: 0x844d,
+ 0x163c: 0x81ed, 0x163d: 0x846d, 0x163e: 0x848d, 0x163f: 0x824d,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x84ad, 0x1641: 0x84cd, 0x1642: 0x84ed, 0x1643: 0x850d, 0x1644: 0x852d, 0x1645: 0x854d,
+ 0x1646: 0x856d, 0x1647: 0x858d, 0x1648: 0x850d, 0x1649: 0x85ad, 0x164a: 0x850d, 0x164b: 0x85cd,
+ 0x164c: 0x85cd, 0x164d: 0x85ed, 0x164e: 0x85ed, 0x164f: 0x860d, 0x1650: 0x854d, 0x1651: 0x862d,
+ 0x1652: 0x864d, 0x1653: 0x862d, 0x1654: 0x866d, 0x1655: 0x864d, 0x1656: 0x868d, 0x1657: 0x868d,
+ 0x1658: 0x86ad, 0x1659: 0x86ad, 0x165a: 0x86cd, 0x165b: 0x86cd, 0x165c: 0x864d, 0x165d: 0x814d,
+ 0x165e: 0x86ed, 0x165f: 0x870d, 0x1660: 0x0040, 0x1661: 0x872d, 0x1662: 0x874d, 0x1663: 0x876d,
+ 0x1664: 0x878d, 0x1665: 0x876d, 0x1666: 0x87ad, 0x1667: 0x87cd, 0x1668: 0x87ed, 0x1669: 0x87ed,
+ 0x166a: 0x880d, 0x166b: 0x880d, 0x166c: 0x882d, 0x166d: 0x882d, 0x166e: 0x880d, 0x166f: 0x880d,
+ 0x1670: 0x884d, 0x1671: 0x886d, 0x1672: 0x888d, 0x1673: 0x88ad, 0x1674: 0x88cd, 0x1675: 0x88ed,
+ 0x1676: 0x88ed, 0x1677: 0x88ed, 0x1678: 0x890d, 0x1679: 0x890d, 0x167a: 0x890d, 0x167b: 0x890d,
+ 0x167c: 0x87ed, 0x167d: 0x87ed, 0x167e: 0x87ed, 0x167f: 0x0040,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x874d, 0x1683: 0x872d, 0x1684: 0x892d, 0x1685: 0x872d,
+ 0x1686: 0x874d, 0x1687: 0x872d, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x894d, 0x168b: 0x874d,
+ 0x168c: 0x896d, 0x168d: 0x892d, 0x168e: 0x896d, 0x168f: 0x874d, 0x1690: 0x0040, 0x1691: 0x0040,
+ 0x1692: 0x898d, 0x1693: 0x89ad, 0x1694: 0x88ad, 0x1695: 0x896d, 0x1696: 0x892d, 0x1697: 0x896d,
+ 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x89cd, 0x169b: 0x89ed, 0x169c: 0x89cd, 0x169d: 0x0040,
+ 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0x21e9, 0x16a1: 0x21f1, 0x16a2: 0x21f9, 0x16a3: 0x8a0e,
+ 0x16a4: 0x2201, 0x16a5: 0x2209, 0x16a6: 0x8a2d, 0x16a7: 0x0040, 0x16a8: 0x8a4d, 0x16a9: 0x8a6d,
+ 0x16aa: 0x8a8d, 0x16ab: 0x8a6d, 0x16ac: 0x8aad, 0x16ad: 0x8acd, 0x16ae: 0x8aed, 0x16af: 0x0040,
+ 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040,
+ 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340,
+ 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x0a08, 0x16c1: 0x0a08, 0x16c2: 0x0a08, 0x16c3: 0x0a08, 0x16c4: 0x0a08, 0x16c5: 0x0c08,
+ 0x16c6: 0x0808, 0x16c7: 0x0c08, 0x16c8: 0x0818, 0x16c9: 0x0c08, 0x16ca: 0x0c08, 0x16cb: 0x0808,
+ 0x16cc: 0x0808, 0x16cd: 0x0908, 0x16ce: 0x0c08, 0x16cf: 0x0c08, 0x16d0: 0x0c08, 0x16d1: 0x0c08,
+ 0x16d2: 0x0c08, 0x16d3: 0x0a08, 0x16d4: 0x0a08, 0x16d5: 0x0a08, 0x16d6: 0x0a08, 0x16d7: 0x0908,
+ 0x16d8: 0x0a08, 0x16d9: 0x0a08, 0x16da: 0x0a08, 0x16db: 0x0a08, 0x16dc: 0x0a08, 0x16dd: 0x0c08,
+ 0x16de: 0x0a08, 0x16df: 0x0a08, 0x16e0: 0x0a08, 0x16e1: 0x0c08, 0x16e2: 0x0808, 0x16e3: 0x0808,
+ 0x16e4: 0x0c08, 0x16e5: 0x3308, 0x16e6: 0x3308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040,
+ 0x16ea: 0x0040, 0x16eb: 0x0a18, 0x16ec: 0x0a18, 0x16ed: 0x0a18, 0x16ee: 0x0a18, 0x16ef: 0x0c18,
+ 0x16f0: 0x0818, 0x16f1: 0x0818, 0x16f2: 0x0818, 0x16f3: 0x0818, 0x16f4: 0x0818, 0x16f5: 0x0818,
+ 0x16f6: 0x0818, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040,
+ 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0a08, 0x1701: 0x0c08, 0x1702: 0x0a08, 0x1703: 0x0c08, 0x1704: 0x0c08, 0x1705: 0x0c08,
+ 0x1706: 0x0a08, 0x1707: 0x0a08, 0x1708: 0x0a08, 0x1709: 0x0c08, 0x170a: 0x0a08, 0x170b: 0x0a08,
+ 0x170c: 0x0c08, 0x170d: 0x0a08, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0a08, 0x1711: 0x0c08,
+ 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040,
+ 0x1718: 0x0040, 0x1719: 0x0818, 0x171a: 0x0818, 0x171b: 0x0818, 0x171c: 0x0818, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040,
+ 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0c18,
+ 0x172a: 0x0c18, 0x172b: 0x0c18, 0x172c: 0x0c18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0818,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x3308, 0x1741: 0x3308, 0x1742: 0x3008, 0x1743: 0x3008, 0x1744: 0x0040, 0x1745: 0x0008,
+ 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008,
+ 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040,
+ 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008,
+ 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008,
+ 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008,
+ 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040,
+ 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008,
+ 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008,
+ 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x3308,
+ 0x177c: 0x3308, 0x177d: 0x0008, 0x177e: 0x3008, 0x177f: 0x3008,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x3308, 0x1781: 0x3008, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x3008, 0x1785: 0x0040,
+ 0x1786: 0x0040, 0x1787: 0x3008, 0x1788: 0x3008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x3008,
+ 0x178c: 0x3008, 0x178d: 0x3808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x3008,
+ 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008,
+ 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x3008, 0x17a3: 0x3008,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x3308, 0x17a7: 0x3308, 0x17a8: 0x3308, 0x17a9: 0x3308,
+ 0x17aa: 0x3308, 0x17ab: 0x3308, 0x17ac: 0x3308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040,
+ 0x17b0: 0x3308, 0x17b1: 0x3308, 0x17b2: 0x3308, 0x17b3: 0x3308, 0x17b4: 0x3308, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x0008, 0x17c1: 0x0008, 0x17c2: 0x0008, 0x17c3: 0x0008, 0x17c4: 0x0008, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0040, 0x17c8: 0x0040, 0x17c9: 0x0008, 0x17ca: 0x0040, 0x17cb: 0x0040,
+ 0x17cc: 0x0008, 0x17cd: 0x0008, 0x17ce: 0x0008, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0008,
+ 0x17d2: 0x0008, 0x17d3: 0x0008, 0x17d4: 0x0040, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0040,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0008,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x3008, 0x17f1: 0x3008, 0x17f2: 0x3008, 0x17f3: 0x3008, 0x17f4: 0x3008, 0x17f5: 0x3008,
+ 0x17f6: 0x0040, 0x17f7: 0x3008, 0x17f8: 0x3008, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x3308,
+ 0x17fc: 0x3308, 0x17fd: 0x3808, 0x17fe: 0x3b08, 0x17ff: 0x0008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x0019, 0x1801: 0x02e9, 0x1802: 0x03d9, 0x1803: 0x02f1, 0x1804: 0x02f9, 0x1805: 0x03f1,
+ 0x1806: 0x0309, 0x1807: 0x00a9, 0x1808: 0x0311, 0x1809: 0x00b1, 0x180a: 0x0319, 0x180b: 0x0101,
+ 0x180c: 0x0321, 0x180d: 0x0329, 0x180e: 0x0051, 0x180f: 0x0339, 0x1810: 0x0751, 0x1811: 0x00b9,
+ 0x1812: 0x0089, 0x1813: 0x0341, 0x1814: 0x0349, 0x1815: 0x0391, 0x1816: 0x00c1, 0x1817: 0x0109,
+ 0x1818: 0x00c9, 0x1819: 0x04b1, 0x181a: 0x0019, 0x181b: 0x02e9, 0x181c: 0x03d9, 0x181d: 0x02f1,
+ 0x181e: 0x02f9, 0x181f: 0x03f1, 0x1820: 0x0309, 0x1821: 0x00a9, 0x1822: 0x0311, 0x1823: 0x00b1,
+ 0x1824: 0x0319, 0x1825: 0x0101, 0x1826: 0x0321, 0x1827: 0x0329, 0x1828: 0x0051, 0x1829: 0x0339,
+ 0x182a: 0x0751, 0x182b: 0x00b9, 0x182c: 0x0089, 0x182d: 0x0341, 0x182e: 0x0349, 0x182f: 0x0391,
+ 0x1830: 0x00c1, 0x1831: 0x0109, 0x1832: 0x00c9, 0x1833: 0x04b1, 0x1834: 0x0019, 0x1835: 0x02e9,
+ 0x1836: 0x03d9, 0x1837: 0x02f1, 0x1838: 0x02f9, 0x1839: 0x03f1, 0x183a: 0x0309, 0x183b: 0x00a9,
+ 0x183c: 0x0311, 0x183d: 0x00b1, 0x183e: 0x0319, 0x183f: 0x0101,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0321, 0x1841: 0x0329, 0x1842: 0x0051, 0x1843: 0x0339, 0x1844: 0x0751, 0x1845: 0x00b9,
+ 0x1846: 0x0089, 0x1847: 0x0341, 0x1848: 0x0349, 0x1849: 0x0391, 0x184a: 0x00c1, 0x184b: 0x0109,
+ 0x184c: 0x00c9, 0x184d: 0x04b1, 0x184e: 0x0019, 0x184f: 0x02e9, 0x1850: 0x03d9, 0x1851: 0x02f1,
+ 0x1852: 0x02f9, 0x1853: 0x03f1, 0x1854: 0x0309, 0x1855: 0x0040, 0x1856: 0x0311, 0x1857: 0x00b1,
+ 0x1858: 0x0319, 0x1859: 0x0101, 0x185a: 0x0321, 0x185b: 0x0329, 0x185c: 0x0051, 0x185d: 0x0339,
+ 0x185e: 0x0751, 0x185f: 0x00b9, 0x1860: 0x0089, 0x1861: 0x0341, 0x1862: 0x0349, 0x1863: 0x0391,
+ 0x1864: 0x00c1, 0x1865: 0x0109, 0x1866: 0x00c9, 0x1867: 0x04b1, 0x1868: 0x0019, 0x1869: 0x02e9,
+ 0x186a: 0x03d9, 0x186b: 0x02f1, 0x186c: 0x02f9, 0x186d: 0x03f1, 0x186e: 0x0309, 0x186f: 0x00a9,
+ 0x1870: 0x0311, 0x1871: 0x00b1, 0x1872: 0x0319, 0x1873: 0x0101, 0x1874: 0x0321, 0x1875: 0x0329,
+ 0x1876: 0x0051, 0x1877: 0x0339, 0x1878: 0x0751, 0x1879: 0x00b9, 0x187a: 0x0089, 0x187b: 0x0341,
+ 0x187c: 0x0349, 0x187d: 0x0391, 0x187e: 0x00c1, 0x187f: 0x0109,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x00c9, 0x1881: 0x04b1, 0x1882: 0x0019, 0x1883: 0x02e9, 0x1884: 0x03d9, 0x1885: 0x02f1,
+ 0x1886: 0x02f9, 0x1887: 0x03f1, 0x1888: 0x0309, 0x1889: 0x00a9, 0x188a: 0x0311, 0x188b: 0x00b1,
+ 0x188c: 0x0319, 0x188d: 0x0101, 0x188e: 0x0321, 0x188f: 0x0329, 0x1890: 0x0051, 0x1891: 0x0339,
+ 0x1892: 0x0751, 0x1893: 0x00b9, 0x1894: 0x0089, 0x1895: 0x0341, 0x1896: 0x0349, 0x1897: 0x0391,
+ 0x1898: 0x00c1, 0x1899: 0x0109, 0x189a: 0x00c9, 0x189b: 0x04b1, 0x189c: 0x0019, 0x189d: 0x0040,
+ 0x189e: 0x03d9, 0x189f: 0x02f1, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0309, 0x18a3: 0x0040,
+ 0x18a4: 0x0040, 0x18a5: 0x00b1, 0x18a6: 0x0319, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0329,
+ 0x18aa: 0x0051, 0x18ab: 0x0339, 0x18ac: 0x0751, 0x18ad: 0x0040, 0x18ae: 0x0089, 0x18af: 0x0341,
+ 0x18b0: 0x0349, 0x18b1: 0x0391, 0x18b2: 0x00c1, 0x18b3: 0x0109, 0x18b4: 0x00c9, 0x18b5: 0x04b1,
+ 0x18b6: 0x0019, 0x18b7: 0x02e9, 0x18b8: 0x03d9, 0x18b9: 0x02f1, 0x18ba: 0x0040, 0x18bb: 0x03f1,
+ 0x18bc: 0x0040, 0x18bd: 0x00a9, 0x18be: 0x0311, 0x18bf: 0x00b1,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0319, 0x18c1: 0x0101, 0x18c2: 0x0321, 0x18c3: 0x0329, 0x18c4: 0x0040, 0x18c5: 0x0339,
+ 0x18c6: 0x0751, 0x18c7: 0x00b9, 0x18c8: 0x0089, 0x18c9: 0x0341, 0x18ca: 0x0349, 0x18cb: 0x0391,
+ 0x18cc: 0x00c1, 0x18cd: 0x0109, 0x18ce: 0x00c9, 0x18cf: 0x04b1, 0x18d0: 0x0019, 0x18d1: 0x02e9,
+ 0x18d2: 0x03d9, 0x18d3: 0x02f1, 0x18d4: 0x02f9, 0x18d5: 0x03f1, 0x18d6: 0x0309, 0x18d7: 0x00a9,
+ 0x18d8: 0x0311, 0x18d9: 0x00b1, 0x18da: 0x0319, 0x18db: 0x0101, 0x18dc: 0x0321, 0x18dd: 0x0329,
+ 0x18de: 0x0051, 0x18df: 0x0339, 0x18e0: 0x0751, 0x18e1: 0x00b9, 0x18e2: 0x0089, 0x18e3: 0x0341,
+ 0x18e4: 0x0349, 0x18e5: 0x0391, 0x18e6: 0x00c1, 0x18e7: 0x0109, 0x18e8: 0x00c9, 0x18e9: 0x04b1,
+ 0x18ea: 0x0019, 0x18eb: 0x02e9, 0x18ec: 0x03d9, 0x18ed: 0x02f1, 0x18ee: 0x02f9, 0x18ef: 0x03f1,
+ 0x18f0: 0x0309, 0x18f1: 0x00a9, 0x18f2: 0x0311, 0x18f3: 0x00b1, 0x18f4: 0x0319, 0x18f5: 0x0101,
+ 0x18f6: 0x0321, 0x18f7: 0x0329, 0x18f8: 0x0051, 0x18f9: 0x0339, 0x18fa: 0x0751, 0x18fb: 0x00b9,
+ 0x18fc: 0x0089, 0x18fd: 0x0341, 0x18fe: 0x0349, 0x18ff: 0x0391,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x00c1, 0x1901: 0x0109, 0x1902: 0x00c9, 0x1903: 0x04b1, 0x1904: 0x0019, 0x1905: 0x02e9,
+ 0x1906: 0x0040, 0x1907: 0x02f1, 0x1908: 0x02f9, 0x1909: 0x03f1, 0x190a: 0x0309, 0x190b: 0x0040,
+ 0x190c: 0x0040, 0x190d: 0x00b1, 0x190e: 0x0319, 0x190f: 0x0101, 0x1910: 0x0321, 0x1911: 0x0329,
+ 0x1912: 0x0051, 0x1913: 0x0339, 0x1914: 0x0751, 0x1915: 0x0040, 0x1916: 0x0089, 0x1917: 0x0341,
+ 0x1918: 0x0349, 0x1919: 0x0391, 0x191a: 0x00c1, 0x191b: 0x0109, 0x191c: 0x00c9, 0x191d: 0x0040,
+ 0x191e: 0x0019, 0x191f: 0x02e9, 0x1920: 0x03d9, 0x1921: 0x02f1, 0x1922: 0x02f9, 0x1923: 0x03f1,
+ 0x1924: 0x0309, 0x1925: 0x00a9, 0x1926: 0x0311, 0x1927: 0x00b1, 0x1928: 0x0319, 0x1929: 0x0101,
+ 0x192a: 0x0321, 0x192b: 0x0329, 0x192c: 0x0051, 0x192d: 0x0339, 0x192e: 0x0751, 0x192f: 0x00b9,
+ 0x1930: 0x0089, 0x1931: 0x0341, 0x1932: 0x0349, 0x1933: 0x0391, 0x1934: 0x00c1, 0x1935: 0x0109,
+ 0x1936: 0x00c9, 0x1937: 0x04b1, 0x1938: 0x0019, 0x1939: 0x02e9, 0x193a: 0x0040, 0x193b: 0x02f1,
+ 0x193c: 0x02f9, 0x193d: 0x03f1, 0x193e: 0x0309, 0x193f: 0x0040,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0311, 0x1941: 0x00b1, 0x1942: 0x0319, 0x1943: 0x0101, 0x1944: 0x0321, 0x1945: 0x0040,
+ 0x1946: 0x0051, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x0089, 0x194b: 0x0341,
+ 0x194c: 0x0349, 0x194d: 0x0391, 0x194e: 0x00c1, 0x194f: 0x0109, 0x1950: 0x00c9, 0x1951: 0x0040,
+ 0x1952: 0x0019, 0x1953: 0x02e9, 0x1954: 0x03d9, 0x1955: 0x02f1, 0x1956: 0x02f9, 0x1957: 0x03f1,
+ 0x1958: 0x0309, 0x1959: 0x00a9, 0x195a: 0x0311, 0x195b: 0x00b1, 0x195c: 0x0319, 0x195d: 0x0101,
+ 0x195e: 0x0321, 0x195f: 0x0329, 0x1960: 0x0051, 0x1961: 0x0339, 0x1962: 0x0751, 0x1963: 0x00b9,
+ 0x1964: 0x0089, 0x1965: 0x0341, 0x1966: 0x0349, 0x1967: 0x0391, 0x1968: 0x00c1, 0x1969: 0x0109,
+ 0x196a: 0x00c9, 0x196b: 0x04b1, 0x196c: 0x0019, 0x196d: 0x02e9, 0x196e: 0x03d9, 0x196f: 0x02f1,
+ 0x1970: 0x02f9, 0x1971: 0x03f1, 0x1972: 0x0309, 0x1973: 0x00a9, 0x1974: 0x0311, 0x1975: 0x00b1,
+ 0x1976: 0x0319, 0x1977: 0x0101, 0x1978: 0x0321, 0x1979: 0x0329, 0x197a: 0x0051, 0x197b: 0x0339,
+ 0x197c: 0x0751, 0x197d: 0x00b9, 0x197e: 0x0089, 0x197f: 0x0341,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0349, 0x1981: 0x0391, 0x1982: 0x00c1, 0x1983: 0x0109, 0x1984: 0x00c9, 0x1985: 0x04b1,
+ 0x1986: 0x0019, 0x1987: 0x02e9, 0x1988: 0x03d9, 0x1989: 0x02f1, 0x198a: 0x02f9, 0x198b: 0x03f1,
+ 0x198c: 0x0309, 0x198d: 0x00a9, 0x198e: 0x0311, 0x198f: 0x00b1, 0x1990: 0x0319, 0x1991: 0x0101,
+ 0x1992: 0x0321, 0x1993: 0x0329, 0x1994: 0x0051, 0x1995: 0x0339, 0x1996: 0x0751, 0x1997: 0x00b9,
+ 0x1998: 0x0089, 0x1999: 0x0341, 0x199a: 0x0349, 0x199b: 0x0391, 0x199c: 0x00c1, 0x199d: 0x0109,
+ 0x199e: 0x00c9, 0x199f: 0x04b1, 0x19a0: 0x0019, 0x19a1: 0x02e9, 0x19a2: 0x03d9, 0x19a3: 0x02f1,
+ 0x19a4: 0x02f9, 0x19a5: 0x03f1, 0x19a6: 0x0309, 0x19a7: 0x00a9, 0x19a8: 0x0311, 0x19a9: 0x00b1,
+ 0x19aa: 0x0319, 0x19ab: 0x0101, 0x19ac: 0x0321, 0x19ad: 0x0329, 0x19ae: 0x0051, 0x19af: 0x0339,
+ 0x19b0: 0x0751, 0x19b1: 0x00b9, 0x19b2: 0x0089, 0x19b3: 0x0341, 0x19b4: 0x0349, 0x19b5: 0x0391,
+ 0x19b6: 0x00c1, 0x19b7: 0x0109, 0x19b8: 0x00c9, 0x19b9: 0x04b1, 0x19ba: 0x0019, 0x19bb: 0x02e9,
+ 0x19bc: 0x03d9, 0x19bd: 0x02f1, 0x19be: 0x02f9, 0x19bf: 0x03f1,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0309, 0x19c1: 0x00a9, 0x19c2: 0x0311, 0x19c3: 0x00b1, 0x19c4: 0x0319, 0x19c5: 0x0101,
+ 0x19c6: 0x0321, 0x19c7: 0x0329, 0x19c8: 0x0051, 0x19c9: 0x0339, 0x19ca: 0x0751, 0x19cb: 0x00b9,
+ 0x19cc: 0x0089, 0x19cd: 0x0341, 0x19ce: 0x0349, 0x19cf: 0x0391, 0x19d0: 0x00c1, 0x19d1: 0x0109,
+ 0x19d2: 0x00c9, 0x19d3: 0x04b1, 0x19d4: 0x0019, 0x19d5: 0x02e9, 0x19d6: 0x03d9, 0x19d7: 0x02f1,
+ 0x19d8: 0x02f9, 0x19d9: 0x03f1, 0x19da: 0x0309, 0x19db: 0x00a9, 0x19dc: 0x0311, 0x19dd: 0x00b1,
+ 0x19de: 0x0319, 0x19df: 0x0101, 0x19e0: 0x0321, 0x19e1: 0x0329, 0x19e2: 0x0051, 0x19e3: 0x0339,
+ 0x19e4: 0x0751, 0x19e5: 0x00b9, 0x19e6: 0x0089, 0x19e7: 0x0341, 0x19e8: 0x0349, 0x19e9: 0x0391,
+ 0x19ea: 0x00c1, 0x19eb: 0x0109, 0x19ec: 0x00c9, 0x19ed: 0x04b1, 0x19ee: 0x0019, 0x19ef: 0x02e9,
+ 0x19f0: 0x03d9, 0x19f1: 0x02f1, 0x19f2: 0x02f9, 0x19f3: 0x03f1, 0x19f4: 0x0309, 0x19f5: 0x00a9,
+ 0x19f6: 0x0311, 0x19f7: 0x00b1, 0x19f8: 0x0319, 0x19f9: 0x0101, 0x19fa: 0x0321, 0x19fb: 0x0329,
+ 0x19fc: 0x0051, 0x19fd: 0x0339, 0x19fe: 0x0751, 0x19ff: 0x00b9,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0089, 0x1a01: 0x0341, 0x1a02: 0x0349, 0x1a03: 0x0391, 0x1a04: 0x00c1, 0x1a05: 0x0109,
+ 0x1a06: 0x00c9, 0x1a07: 0x04b1, 0x1a08: 0x0019, 0x1a09: 0x02e9, 0x1a0a: 0x03d9, 0x1a0b: 0x02f1,
+ 0x1a0c: 0x02f9, 0x1a0d: 0x03f1, 0x1a0e: 0x0309, 0x1a0f: 0x00a9, 0x1a10: 0x0311, 0x1a11: 0x00b1,
+ 0x1a12: 0x0319, 0x1a13: 0x0101, 0x1a14: 0x0321, 0x1a15: 0x0329, 0x1a16: 0x0051, 0x1a17: 0x0339,
+ 0x1a18: 0x0751, 0x1a19: 0x00b9, 0x1a1a: 0x0089, 0x1a1b: 0x0341, 0x1a1c: 0x0349, 0x1a1d: 0x0391,
+ 0x1a1e: 0x00c1, 0x1a1f: 0x0109, 0x1a20: 0x00c9, 0x1a21: 0x04b1, 0x1a22: 0x0019, 0x1a23: 0x02e9,
+ 0x1a24: 0x03d9, 0x1a25: 0x02f1, 0x1a26: 0x02f9, 0x1a27: 0x03f1, 0x1a28: 0x0309, 0x1a29: 0x00a9,
+ 0x1a2a: 0x0311, 0x1a2b: 0x00b1, 0x1a2c: 0x0319, 0x1a2d: 0x0101, 0x1a2e: 0x0321, 0x1a2f: 0x0329,
+ 0x1a30: 0x0051, 0x1a31: 0x0339, 0x1a32: 0x0751, 0x1a33: 0x00b9, 0x1a34: 0x0089, 0x1a35: 0x0341,
+ 0x1a36: 0x0349, 0x1a37: 0x0391, 0x1a38: 0x00c1, 0x1a39: 0x0109, 0x1a3a: 0x00c9, 0x1a3b: 0x04b1,
+ 0x1a3c: 0x0019, 0x1a3d: 0x02e9, 0x1a3e: 0x03d9, 0x1a3f: 0x02f1,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x02f9, 0x1a41: 0x03f1, 0x1a42: 0x0309, 0x1a43: 0x00a9, 0x1a44: 0x0311, 0x1a45: 0x00b1,
+ 0x1a46: 0x0319, 0x1a47: 0x0101, 0x1a48: 0x0321, 0x1a49: 0x0329, 0x1a4a: 0x0051, 0x1a4b: 0x0339,
+ 0x1a4c: 0x0751, 0x1a4d: 0x00b9, 0x1a4e: 0x0089, 0x1a4f: 0x0341, 0x1a50: 0x0349, 0x1a51: 0x0391,
+ 0x1a52: 0x00c1, 0x1a53: 0x0109, 0x1a54: 0x00c9, 0x1a55: 0x04b1, 0x1a56: 0x0019, 0x1a57: 0x02e9,
+ 0x1a58: 0x03d9, 0x1a59: 0x02f1, 0x1a5a: 0x02f9, 0x1a5b: 0x03f1, 0x1a5c: 0x0309, 0x1a5d: 0x00a9,
+ 0x1a5e: 0x0311, 0x1a5f: 0x00b1, 0x1a60: 0x0319, 0x1a61: 0x0101, 0x1a62: 0x0321, 0x1a63: 0x0329,
+ 0x1a64: 0x0051, 0x1a65: 0x0339, 0x1a66: 0x0751, 0x1a67: 0x00b9, 0x1a68: 0x0089, 0x1a69: 0x0341,
+ 0x1a6a: 0x0349, 0x1a6b: 0x0391, 0x1a6c: 0x00c1, 0x1a6d: 0x0109, 0x1a6e: 0x00c9, 0x1a6f: 0x04b1,
+ 0x1a70: 0x0019, 0x1a71: 0x02e9, 0x1a72: 0x03d9, 0x1a73: 0x02f1, 0x1a74: 0x02f9, 0x1a75: 0x03f1,
+ 0x1a76: 0x0309, 0x1a77: 0x00a9, 0x1a78: 0x0311, 0x1a79: 0x00b1, 0x1a7a: 0x0319, 0x1a7b: 0x0101,
+ 0x1a7c: 0x0321, 0x1a7d: 0x0329, 0x1a7e: 0x0051, 0x1a7f: 0x0339,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0751, 0x1a81: 0x00b9, 0x1a82: 0x0089, 0x1a83: 0x0341, 0x1a84: 0x0349, 0x1a85: 0x0391,
+ 0x1a86: 0x00c1, 0x1a87: 0x0109, 0x1a88: 0x00c9, 0x1a89: 0x04b1, 0x1a8a: 0x0019, 0x1a8b: 0x02e9,
+ 0x1a8c: 0x03d9, 0x1a8d: 0x02f1, 0x1a8e: 0x02f9, 0x1a8f: 0x03f1, 0x1a90: 0x0309, 0x1a91: 0x00a9,
+ 0x1a92: 0x0311, 0x1a93: 0x00b1, 0x1a94: 0x0319, 0x1a95: 0x0101, 0x1a96: 0x0321, 0x1a97: 0x0329,
+ 0x1a98: 0x0051, 0x1a99: 0x0339, 0x1a9a: 0x0751, 0x1a9b: 0x00b9, 0x1a9c: 0x0089, 0x1a9d: 0x0341,
+ 0x1a9e: 0x0349, 0x1a9f: 0x0391, 0x1aa0: 0x00c1, 0x1aa1: 0x0109, 0x1aa2: 0x00c9, 0x1aa3: 0x04b1,
+ 0x1aa4: 0x2279, 0x1aa5: 0x2281, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0x2289, 0x1aa9: 0x0399,
+ 0x1aaa: 0x03a1, 0x1aab: 0x03a9, 0x1aac: 0x2291, 0x1aad: 0x2299, 0x1aae: 0x22a1, 0x1aaf: 0x04d1,
+ 0x1ab0: 0x05f9, 0x1ab1: 0x22a9, 0x1ab2: 0x22b1, 0x1ab3: 0x22b9, 0x1ab4: 0x22c1, 0x1ab5: 0x22c9,
+ 0x1ab6: 0x22d1, 0x1ab7: 0x0799, 0x1ab8: 0x03c1, 0x1ab9: 0x04d1, 0x1aba: 0x22d9, 0x1abb: 0x22e1,
+ 0x1abc: 0x22e9, 0x1abd: 0x03b1, 0x1abe: 0x03b9, 0x1abf: 0x22f1,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x0769, 0x1ac1: 0x22f9, 0x1ac2: 0x2289, 0x1ac3: 0x0399, 0x1ac4: 0x03a1, 0x1ac5: 0x03a9,
+ 0x1ac6: 0x2291, 0x1ac7: 0x2299, 0x1ac8: 0x22a1, 0x1ac9: 0x04d1, 0x1aca: 0x05f9, 0x1acb: 0x22a9,
+ 0x1acc: 0x22b1, 0x1acd: 0x22b9, 0x1ace: 0x22c1, 0x1acf: 0x22c9, 0x1ad0: 0x22d1, 0x1ad1: 0x0799,
+ 0x1ad2: 0x03c1, 0x1ad3: 0x22d9, 0x1ad4: 0x22d9, 0x1ad5: 0x22e1, 0x1ad6: 0x22e9, 0x1ad7: 0x03b1,
+ 0x1ad8: 0x03b9, 0x1ad9: 0x22f1, 0x1ada: 0x0769, 0x1adb: 0x2301, 0x1adc: 0x2291, 0x1add: 0x04d1,
+ 0x1ade: 0x22a9, 0x1adf: 0x03b1, 0x1ae0: 0x03c1, 0x1ae1: 0x0799, 0x1ae2: 0x2289, 0x1ae3: 0x0399,
+ 0x1ae4: 0x03a1, 0x1ae5: 0x03a9, 0x1ae6: 0x2291, 0x1ae7: 0x2299, 0x1ae8: 0x22a1, 0x1ae9: 0x04d1,
+ 0x1aea: 0x05f9, 0x1aeb: 0x22a9, 0x1aec: 0x22b1, 0x1aed: 0x22b9, 0x1aee: 0x22c1, 0x1aef: 0x22c9,
+ 0x1af0: 0x22d1, 0x1af1: 0x0799, 0x1af2: 0x03c1, 0x1af3: 0x04d1, 0x1af4: 0x22d9, 0x1af5: 0x22e1,
+ 0x1af6: 0x22e9, 0x1af7: 0x03b1, 0x1af8: 0x03b9, 0x1af9: 0x22f1, 0x1afa: 0x0769, 0x1afb: 0x22f9,
+ 0x1afc: 0x2289, 0x1afd: 0x0399, 0x1afe: 0x03a1, 0x1aff: 0x03a9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2291, 0x1b01: 0x2299, 0x1b02: 0x22a1, 0x1b03: 0x04d1, 0x1b04: 0x05f9, 0x1b05: 0x22a9,
+ 0x1b06: 0x22b1, 0x1b07: 0x22b9, 0x1b08: 0x22c1, 0x1b09: 0x22c9, 0x1b0a: 0x22d1, 0x1b0b: 0x0799,
+ 0x1b0c: 0x03c1, 0x1b0d: 0x22d9, 0x1b0e: 0x22d9, 0x1b0f: 0x22e1, 0x1b10: 0x22e9, 0x1b11: 0x03b1,
+ 0x1b12: 0x03b9, 0x1b13: 0x22f1, 0x1b14: 0x0769, 0x1b15: 0x2301, 0x1b16: 0x2291, 0x1b17: 0x04d1,
+ 0x1b18: 0x22a9, 0x1b19: 0x03b1, 0x1b1a: 0x03c1, 0x1b1b: 0x0799, 0x1b1c: 0x2289, 0x1b1d: 0x0399,
+ 0x1b1e: 0x03a1, 0x1b1f: 0x03a9, 0x1b20: 0x2291, 0x1b21: 0x2299, 0x1b22: 0x22a1, 0x1b23: 0x04d1,
+ 0x1b24: 0x05f9, 0x1b25: 0x22a9, 0x1b26: 0x22b1, 0x1b27: 0x22b9, 0x1b28: 0x22c1, 0x1b29: 0x22c9,
+ 0x1b2a: 0x22d1, 0x1b2b: 0x0799, 0x1b2c: 0x03c1, 0x1b2d: 0x04d1, 0x1b2e: 0x22d9, 0x1b2f: 0x22e1,
+ 0x1b30: 0x22e9, 0x1b31: 0x03b1, 0x1b32: 0x03b9, 0x1b33: 0x22f1, 0x1b34: 0x0769, 0x1b35: 0x22f9,
+ 0x1b36: 0x2289, 0x1b37: 0x0399, 0x1b38: 0x03a1, 0x1b39: 0x03a9, 0x1b3a: 0x2291, 0x1b3b: 0x2299,
+ 0x1b3c: 0x22a1, 0x1b3d: 0x04d1, 0x1b3e: 0x05f9, 0x1b3f: 0x22a9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0x22b1, 0x1b41: 0x22b9, 0x1b42: 0x22c1, 0x1b43: 0x22c9, 0x1b44: 0x22d1, 0x1b45: 0x0799,
+ 0x1b46: 0x03c1, 0x1b47: 0x22d9, 0x1b48: 0x22d9, 0x1b49: 0x22e1, 0x1b4a: 0x22e9, 0x1b4b: 0x03b1,
+ 0x1b4c: 0x03b9, 0x1b4d: 0x22f1, 0x1b4e: 0x0769, 0x1b4f: 0x2301, 0x1b50: 0x2291, 0x1b51: 0x04d1,
+ 0x1b52: 0x22a9, 0x1b53: 0x03b1, 0x1b54: 0x03c1, 0x1b55: 0x0799, 0x1b56: 0x2289, 0x1b57: 0x0399,
+ 0x1b58: 0x03a1, 0x1b59: 0x03a9, 0x1b5a: 0x2291, 0x1b5b: 0x2299, 0x1b5c: 0x22a1, 0x1b5d: 0x04d1,
+ 0x1b5e: 0x05f9, 0x1b5f: 0x22a9, 0x1b60: 0x22b1, 0x1b61: 0x22b9, 0x1b62: 0x22c1, 0x1b63: 0x22c9,
+ 0x1b64: 0x22d1, 0x1b65: 0x0799, 0x1b66: 0x03c1, 0x1b67: 0x04d1, 0x1b68: 0x22d9, 0x1b69: 0x22e1,
+ 0x1b6a: 0x22e9, 0x1b6b: 0x03b1, 0x1b6c: 0x03b9, 0x1b6d: 0x22f1, 0x1b6e: 0x0769, 0x1b6f: 0x22f9,
+ 0x1b70: 0x2289, 0x1b71: 0x0399, 0x1b72: 0x03a1, 0x1b73: 0x03a9, 0x1b74: 0x2291, 0x1b75: 0x2299,
+ 0x1b76: 0x22a1, 0x1b77: 0x04d1, 0x1b78: 0x05f9, 0x1b79: 0x22a9, 0x1b7a: 0x22b1, 0x1b7b: 0x22b9,
+ 0x1b7c: 0x22c1, 0x1b7d: 0x22c9, 0x1b7e: 0x22d1, 0x1b7f: 0x0799,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0x03c1, 0x1b81: 0x22d9, 0x1b82: 0x22d9, 0x1b83: 0x22e1, 0x1b84: 0x22e9, 0x1b85: 0x03b1,
+ 0x1b86: 0x03b9, 0x1b87: 0x22f1, 0x1b88: 0x0769, 0x1b89: 0x2301, 0x1b8a: 0x2291, 0x1b8b: 0x04d1,
+ 0x1b8c: 0x22a9, 0x1b8d: 0x03b1, 0x1b8e: 0x03c1, 0x1b8f: 0x0799, 0x1b90: 0x2289, 0x1b91: 0x0399,
+ 0x1b92: 0x03a1, 0x1b93: 0x03a9, 0x1b94: 0x2291, 0x1b95: 0x2299, 0x1b96: 0x22a1, 0x1b97: 0x04d1,
+ 0x1b98: 0x05f9, 0x1b99: 0x22a9, 0x1b9a: 0x22b1, 0x1b9b: 0x22b9, 0x1b9c: 0x22c1, 0x1b9d: 0x22c9,
+ 0x1b9e: 0x22d1, 0x1b9f: 0x0799, 0x1ba0: 0x03c1, 0x1ba1: 0x04d1, 0x1ba2: 0x22d9, 0x1ba3: 0x22e1,
+ 0x1ba4: 0x22e9, 0x1ba5: 0x03b1, 0x1ba6: 0x03b9, 0x1ba7: 0x22f1, 0x1ba8: 0x0769, 0x1ba9: 0x22f9,
+ 0x1baa: 0x2289, 0x1bab: 0x0399, 0x1bac: 0x03a1, 0x1bad: 0x03a9, 0x1bae: 0x2291, 0x1baf: 0x2299,
+ 0x1bb0: 0x22a1, 0x1bb1: 0x04d1, 0x1bb2: 0x05f9, 0x1bb3: 0x22a9, 0x1bb4: 0x22b1, 0x1bb5: 0x22b9,
+ 0x1bb6: 0x22c1, 0x1bb7: 0x22c9, 0x1bb8: 0x22d1, 0x1bb9: 0x0799, 0x1bba: 0x03c1, 0x1bbb: 0x22d9,
+ 0x1bbc: 0x22d9, 0x1bbd: 0x22e1, 0x1bbe: 0x22e9, 0x1bbf: 0x03b1,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x03b9, 0x1bc1: 0x22f1, 0x1bc2: 0x0769, 0x1bc3: 0x2301, 0x1bc4: 0x2291, 0x1bc5: 0x04d1,
+ 0x1bc6: 0x22a9, 0x1bc7: 0x03b1, 0x1bc8: 0x03c1, 0x1bc9: 0x0799, 0x1bca: 0x2309, 0x1bcb: 0x2309,
+ 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x06e1, 0x1bcf: 0x0049, 0x1bd0: 0x0029, 0x1bd1: 0x0031,
+ 0x1bd2: 0x06e9, 0x1bd3: 0x06f1, 0x1bd4: 0x06f9, 0x1bd5: 0x0701, 0x1bd6: 0x0709, 0x1bd7: 0x0711,
+ 0x1bd8: 0x06e1, 0x1bd9: 0x0049, 0x1bda: 0x0029, 0x1bdb: 0x0031, 0x1bdc: 0x06e9, 0x1bdd: 0x06f1,
+ 0x1bde: 0x06f9, 0x1bdf: 0x0701, 0x1be0: 0x0709, 0x1be1: 0x0711, 0x1be2: 0x06e1, 0x1be3: 0x0049,
+ 0x1be4: 0x0029, 0x1be5: 0x0031, 0x1be6: 0x06e9, 0x1be7: 0x06f1, 0x1be8: 0x06f9, 0x1be9: 0x0701,
+ 0x1bea: 0x0709, 0x1beb: 0x0711, 0x1bec: 0x06e1, 0x1bed: 0x0049, 0x1bee: 0x0029, 0x1bef: 0x0031,
+ 0x1bf0: 0x06e9, 0x1bf1: 0x06f1, 0x1bf2: 0x06f9, 0x1bf3: 0x0701, 0x1bf4: 0x0709, 0x1bf5: 0x0711,
+ 0x1bf6: 0x06e1, 0x1bf7: 0x0049, 0x1bf8: 0x0029, 0x1bf9: 0x0031, 0x1bfa: 0x06e9, 0x1bfb: 0x06f1,
+ 0x1bfc: 0x06f9, 0x1bfd: 0x0701, 0x1bfe: 0x0709, 0x1bff: 0x0711,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115,
+ 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135,
+ 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115,
+ 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175,
+ 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115,
+ 0x1c1e: 0x8b3d, 0x1c1f: 0x8b3d, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08,
+ 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08,
+ 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08,
+ 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08,
+ 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08,
+ 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0x20b1, 0x1c41: 0x20b9, 0x1c42: 0x20d9, 0x1c43: 0x20f1, 0x1c44: 0x0040, 0x1c45: 0x2189,
+ 0x1c46: 0x2109, 0x1c47: 0x20e1, 0x1c48: 0x2131, 0x1c49: 0x2191, 0x1c4a: 0x2161, 0x1c4b: 0x2169,
+ 0x1c4c: 0x2171, 0x1c4d: 0x2179, 0x1c4e: 0x2111, 0x1c4f: 0x2141, 0x1c50: 0x2151, 0x1c51: 0x2121,
+ 0x1c52: 0x2159, 0x1c53: 0x2101, 0x1c54: 0x2119, 0x1c55: 0x20c9, 0x1c56: 0x20d1, 0x1c57: 0x20e9,
+ 0x1c58: 0x20f9, 0x1c59: 0x2129, 0x1c5a: 0x2139, 0x1c5b: 0x2149, 0x1c5c: 0x2311, 0x1c5d: 0x1689,
+ 0x1c5e: 0x2319, 0x1c5f: 0x2321, 0x1c60: 0x0040, 0x1c61: 0x20b9, 0x1c62: 0x20d9, 0x1c63: 0x0040,
+ 0x1c64: 0x2181, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0x20e1, 0x1c68: 0x0040, 0x1c69: 0x2191,
+ 0x1c6a: 0x2161, 0x1c6b: 0x2169, 0x1c6c: 0x2171, 0x1c6d: 0x2179, 0x1c6e: 0x2111, 0x1c6f: 0x2141,
+ 0x1c70: 0x2151, 0x1c71: 0x2121, 0x1c72: 0x2159, 0x1c73: 0x0040, 0x1c74: 0x2119, 0x1c75: 0x20c9,
+ 0x1c76: 0x20d1, 0x1c77: 0x20e9, 0x1c78: 0x0040, 0x1c79: 0x2129, 0x1c7a: 0x0040, 0x1c7b: 0x2149,
+ 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0x20d9, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040,
+ 0x1c86: 0x0040, 0x1c87: 0x20e1, 0x1c88: 0x0040, 0x1c89: 0x2191, 0x1c8a: 0x0040, 0x1c8b: 0x2169,
+ 0x1c8c: 0x0040, 0x1c8d: 0x2179, 0x1c8e: 0x2111, 0x1c8f: 0x2141, 0x1c90: 0x0040, 0x1c91: 0x2121,
+ 0x1c92: 0x2159, 0x1c93: 0x0040, 0x1c94: 0x2119, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0x20e9,
+ 0x1c98: 0x0040, 0x1c99: 0x2129, 0x1c9a: 0x0040, 0x1c9b: 0x2149, 0x1c9c: 0x0040, 0x1c9d: 0x1689,
+ 0x1c9e: 0x0040, 0x1c9f: 0x2321, 0x1ca0: 0x0040, 0x1ca1: 0x20b9, 0x1ca2: 0x20d9, 0x1ca3: 0x0040,
+ 0x1ca4: 0x2181, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0x20e1, 0x1ca8: 0x2131, 0x1ca9: 0x2191,
+ 0x1caa: 0x2161, 0x1cab: 0x0040, 0x1cac: 0x2171, 0x1cad: 0x2179, 0x1cae: 0x2111, 0x1caf: 0x2141,
+ 0x1cb0: 0x2151, 0x1cb1: 0x2121, 0x1cb2: 0x2159, 0x1cb3: 0x0040, 0x1cb4: 0x2119, 0x1cb5: 0x20c9,
+ 0x1cb6: 0x20d1, 0x1cb7: 0x20e9, 0x1cb8: 0x0040, 0x1cb9: 0x2129, 0x1cba: 0x2139, 0x1cbb: 0x2149,
+ 0x1cbc: 0x2311, 0x1cbd: 0x0040, 0x1cbe: 0x2319, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x20b1, 0x1cc1: 0x20b9, 0x1cc2: 0x20d9, 0x1cc3: 0x20f1, 0x1cc4: 0x2181, 0x1cc5: 0x2189,
+ 0x1cc6: 0x2109, 0x1cc7: 0x20e1, 0x1cc8: 0x2131, 0x1cc9: 0x2191, 0x1cca: 0x0040, 0x1ccb: 0x2169,
+ 0x1ccc: 0x2171, 0x1ccd: 0x2179, 0x1cce: 0x2111, 0x1ccf: 0x2141, 0x1cd0: 0x2151, 0x1cd1: 0x2121,
+ 0x1cd2: 0x2159, 0x1cd3: 0x2101, 0x1cd4: 0x2119, 0x1cd5: 0x20c9, 0x1cd6: 0x20d1, 0x1cd7: 0x20e9,
+ 0x1cd8: 0x20f9, 0x1cd9: 0x2129, 0x1cda: 0x2139, 0x1cdb: 0x2149, 0x1cdc: 0x0040, 0x1cdd: 0x0040,
+ 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0x20b9, 0x1ce2: 0x20d9, 0x1ce3: 0x20f1,
+ 0x1ce4: 0x0040, 0x1ce5: 0x2189, 0x1ce6: 0x2109, 0x1ce7: 0x20e1, 0x1ce8: 0x2131, 0x1ce9: 0x2191,
+ 0x1cea: 0x0040, 0x1ceb: 0x2169, 0x1cec: 0x2171, 0x1ced: 0x2179, 0x1cee: 0x2111, 0x1cef: 0x2141,
+ 0x1cf0: 0x2151, 0x1cf1: 0x2121, 0x1cf2: 0x2159, 0x1cf3: 0x2101, 0x1cf4: 0x2119, 0x1cf5: 0x20c9,
+ 0x1cf6: 0x20d1, 0x1cf7: 0x20e9, 0x1cf8: 0x20f9, 0x1cf9: 0x2129, 0x1cfa: 0x2139, 0x1cfb: 0x2149,
+ 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0x0040, 0x1d01: 0x232a, 0x1d02: 0x2332, 0x1d03: 0x233a, 0x1d04: 0x2342, 0x1d05: 0x234a,
+ 0x1d06: 0x2352, 0x1d07: 0x235a, 0x1d08: 0x2362, 0x1d09: 0x236a, 0x1d0a: 0x2372, 0x1d0b: 0x0018,
+ 0x1d0c: 0x0018, 0x1d0d: 0x0018, 0x1d0e: 0x0018, 0x1d0f: 0x0018, 0x1d10: 0x237a, 0x1d11: 0x2382,
+ 0x1d12: 0x238a, 0x1d13: 0x2392, 0x1d14: 0x239a, 0x1d15: 0x23a2, 0x1d16: 0x23aa, 0x1d17: 0x23b2,
+ 0x1d18: 0x23ba, 0x1d19: 0x23c2, 0x1d1a: 0x23ca, 0x1d1b: 0x23d2, 0x1d1c: 0x23da, 0x1d1d: 0x23e2,
+ 0x1d1e: 0x23ea, 0x1d1f: 0x23f2, 0x1d20: 0x23fa, 0x1d21: 0x2402, 0x1d22: 0x240a, 0x1d23: 0x2412,
+ 0x1d24: 0x241a, 0x1d25: 0x2422, 0x1d26: 0x242a, 0x1d27: 0x2432, 0x1d28: 0x243a, 0x1d29: 0x2442,
+ 0x1d2a: 0x2449, 0x1d2b: 0x03d9, 0x1d2c: 0x00b9, 0x1d2d: 0x1239, 0x1d2e: 0x2451, 0x1d2f: 0x0018,
+ 0x1d30: 0x0019, 0x1d31: 0x02e9, 0x1d32: 0x03d9, 0x1d33: 0x02f1, 0x1d34: 0x02f9, 0x1d35: 0x03f1,
+ 0x1d36: 0x0309, 0x1d37: 0x00a9, 0x1d38: 0x0311, 0x1d39: 0x00b1, 0x1d3a: 0x0319, 0x1d3b: 0x0101,
+ 0x1d3c: 0x0321, 0x1d3d: 0x0329, 0x1d3e: 0x0051, 0x1d3f: 0x0339,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0751, 0x1d41: 0x00b9, 0x1d42: 0x0089, 0x1d43: 0x0341, 0x1d44: 0x0349, 0x1d45: 0x0391,
+ 0x1d46: 0x00c1, 0x1d47: 0x0109, 0x1d48: 0x00c9, 0x1d49: 0x04b1, 0x1d4a: 0x2459, 0x1d4b: 0x11f9,
+ 0x1d4c: 0x2461, 0x1d4d: 0x04d9, 0x1d4e: 0x2469, 0x1d4f: 0x2471, 0x1d50: 0x0018, 0x1d51: 0x0018,
+ 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018,
+ 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018,
+ 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018,
+ 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018,
+ 0x1d6a: 0x2479, 0x1d6b: 0x2481, 0x1d6c: 0x2489, 0x1d6d: 0x0018, 0x1d6e: 0x0018, 0x1d6f: 0x0018,
+ 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018,
+ 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018,
+ 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2499, 0x1d81: 0x24a1, 0x1d82: 0x24a9, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040,
+ 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040,
+ 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0x24b1, 0x1d91: 0x24b9,
+ 0x1d92: 0x24c1, 0x1d93: 0x24c9, 0x1d94: 0x24d1, 0x1d95: 0x24d9, 0x1d96: 0x24e1, 0x1d97: 0x24e9,
+ 0x1d98: 0x24f1, 0x1d99: 0x24f9, 0x1d9a: 0x2501, 0x1d9b: 0x2509, 0x1d9c: 0x2511, 0x1d9d: 0x2519,
+ 0x1d9e: 0x2521, 0x1d9f: 0x2529, 0x1da0: 0x2531, 0x1da1: 0x2539, 0x1da2: 0x2541, 0x1da3: 0x2549,
+ 0x1da4: 0x2551, 0x1da5: 0x2559, 0x1da6: 0x2561, 0x1da7: 0x2569, 0x1da8: 0x2571, 0x1da9: 0x2579,
+ 0x1daa: 0x2581, 0x1dab: 0x2589, 0x1dac: 0x2591, 0x1dad: 0x2599, 0x1dae: 0x25a1, 0x1daf: 0x25a9,
+ 0x1db0: 0x25b1, 0x1db1: 0x25b9, 0x1db2: 0x25c1, 0x1db3: 0x25c9, 0x1db4: 0x25d1, 0x1db5: 0x25d9,
+ 0x1db6: 0x25e1, 0x1db7: 0x25e9, 0x1db8: 0x25f1, 0x1db9: 0x25f9, 0x1dba: 0x2601, 0x1dbb: 0x2609,
+ 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0x2669, 0x1dc1: 0x2671, 0x1dc2: 0x2679, 0x1dc3: 0x8b55, 0x1dc4: 0x2681, 0x1dc5: 0x2689,
+ 0x1dc6: 0x2691, 0x1dc7: 0x2699, 0x1dc8: 0x26a1, 0x1dc9: 0x26a9, 0x1dca: 0x26b1, 0x1dcb: 0x26b9,
+ 0x1dcc: 0x26c1, 0x1dcd: 0x8b75, 0x1dce: 0x26c9, 0x1dcf: 0x26d1, 0x1dd0: 0x26d9, 0x1dd1: 0x26e1,
+ 0x1dd2: 0x8b95, 0x1dd3: 0x26e9, 0x1dd4: 0x26f1, 0x1dd5: 0x2521, 0x1dd6: 0x8bb5, 0x1dd7: 0x26f9,
+ 0x1dd8: 0x2701, 0x1dd9: 0x2709, 0x1dda: 0x2711, 0x1ddb: 0x2719, 0x1ddc: 0x8bd5, 0x1ddd: 0x2721,
+ 0x1dde: 0x2729, 0x1ddf: 0x2731, 0x1de0: 0x2739, 0x1de1: 0x2741, 0x1de2: 0x25f9, 0x1de3: 0x2749,
+ 0x1de4: 0x2751, 0x1de5: 0x2759, 0x1de6: 0x2761, 0x1de7: 0x2769, 0x1de8: 0x2771, 0x1de9: 0x2779,
+ 0x1dea: 0x2781, 0x1deb: 0x2789, 0x1dec: 0x2791, 0x1ded: 0x2799, 0x1dee: 0x27a1, 0x1def: 0x27a9,
+ 0x1df0: 0x27b1, 0x1df1: 0x27b9, 0x1df2: 0x27b9, 0x1df3: 0x27b9, 0x1df4: 0x8bf5, 0x1df5: 0x27c1,
+ 0x1df6: 0x27c9, 0x1df7: 0x27d1, 0x1df8: 0x8c15, 0x1df9: 0x27d9, 0x1dfa: 0x27e1, 0x1dfb: 0x27e9,
+ 0x1dfc: 0x27f1, 0x1dfd: 0x27f9, 0x1dfe: 0x2801, 0x1dff: 0x2809,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0x2811, 0x1e01: 0x2819, 0x1e02: 0x2821, 0x1e03: 0x2829, 0x1e04: 0x2831, 0x1e05: 0x2839,
+ 0x1e06: 0x2839, 0x1e07: 0x2841, 0x1e08: 0x2849, 0x1e09: 0x2851, 0x1e0a: 0x2859, 0x1e0b: 0x2861,
+ 0x1e0c: 0x2869, 0x1e0d: 0x2871, 0x1e0e: 0x2879, 0x1e0f: 0x2881, 0x1e10: 0x2889, 0x1e11: 0x2891,
+ 0x1e12: 0x2899, 0x1e13: 0x28a1, 0x1e14: 0x28a9, 0x1e15: 0x28b1, 0x1e16: 0x28b9, 0x1e17: 0x28c1,
+ 0x1e18: 0x28c9, 0x1e19: 0x8c35, 0x1e1a: 0x28d1, 0x1e1b: 0x28d9, 0x1e1c: 0x28e1, 0x1e1d: 0x24d9,
+ 0x1e1e: 0x28e9, 0x1e1f: 0x28f1, 0x1e20: 0x8c55, 0x1e21: 0x8c75, 0x1e22: 0x28f9, 0x1e23: 0x2901,
+ 0x1e24: 0x2909, 0x1e25: 0x2911, 0x1e26: 0x2919, 0x1e27: 0x2921, 0x1e28: 0x2040, 0x1e29: 0x2929,
+ 0x1e2a: 0x2931, 0x1e2b: 0x2931, 0x1e2c: 0x8c95, 0x1e2d: 0x2939, 0x1e2e: 0x2941, 0x1e2f: 0x2949,
+ 0x1e30: 0x2951, 0x1e31: 0x8cb5, 0x1e32: 0x2959, 0x1e33: 0x2961, 0x1e34: 0x2040, 0x1e35: 0x2969,
+ 0x1e36: 0x2971, 0x1e37: 0x2979, 0x1e38: 0x2981, 0x1e39: 0x2989, 0x1e3a: 0x2991, 0x1e3b: 0x8cd5,
+ 0x1e3c: 0x2999, 0x1e3d: 0x8cf5, 0x1e3e: 0x29a1, 0x1e3f: 0x29a9,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0x29b1, 0x1e41: 0x29b9, 0x1e42: 0x29c1, 0x1e43: 0x29c9, 0x1e44: 0x29d1, 0x1e45: 0x29d9,
+ 0x1e46: 0x29e1, 0x1e47: 0x29e9, 0x1e48: 0x29f1, 0x1e49: 0x8d15, 0x1e4a: 0x29f9, 0x1e4b: 0x2a01,
+ 0x1e4c: 0x2a09, 0x1e4d: 0x2a11, 0x1e4e: 0x2a19, 0x1e4f: 0x8d35, 0x1e50: 0x2a21, 0x1e51: 0x8d55,
+ 0x1e52: 0x8d75, 0x1e53: 0x2a29, 0x1e54: 0x2a31, 0x1e55: 0x2a31, 0x1e56: 0x2a39, 0x1e57: 0x8d95,
+ 0x1e58: 0x8db5, 0x1e59: 0x2a41, 0x1e5a: 0x2a49, 0x1e5b: 0x2a51, 0x1e5c: 0x2a59, 0x1e5d: 0x2a61,
+ 0x1e5e: 0x2a69, 0x1e5f: 0x2a71, 0x1e60: 0x2a79, 0x1e61: 0x2a81, 0x1e62: 0x2a89, 0x1e63: 0x2a91,
+ 0x1e64: 0x8dd5, 0x1e65: 0x2a99, 0x1e66: 0x2aa1, 0x1e67: 0x2aa9, 0x1e68: 0x2ab1, 0x1e69: 0x2aa9,
+ 0x1e6a: 0x2ab9, 0x1e6b: 0x2ac1, 0x1e6c: 0x2ac9, 0x1e6d: 0x2ad1, 0x1e6e: 0x2ad9, 0x1e6f: 0x2ae1,
+ 0x1e70: 0x2ae9, 0x1e71: 0x2af1, 0x1e72: 0x2af9, 0x1e73: 0x2b01, 0x1e74: 0x2b09, 0x1e75: 0x2b11,
+ 0x1e76: 0x2b19, 0x1e77: 0x2b21, 0x1e78: 0x8df5, 0x1e79: 0x2b29, 0x1e7a: 0x2b31, 0x1e7b: 0x2b39,
+ 0x1e7c: 0x2b41, 0x1e7d: 0x2b49, 0x1e7e: 0x8e15, 0x1e7f: 0x2b51,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0x2b59, 0x1e81: 0x2b61, 0x1e82: 0x2b69, 0x1e83: 0x2b71, 0x1e84: 0x2b79, 0x1e85: 0x2b81,
+ 0x1e86: 0x2b89, 0x1e87: 0x2b91, 0x1e88: 0x2b99, 0x1e89: 0x2ba1, 0x1e8a: 0x8e35, 0x1e8b: 0x2ba9,
+ 0x1e8c: 0x2bb1, 0x1e8d: 0x2bb9, 0x1e8e: 0x2bc1, 0x1e8f: 0x2bc9, 0x1e90: 0x2bd1, 0x1e91: 0x2bd9,
+ 0x1e92: 0x2be1, 0x1e93: 0x2be9, 0x1e94: 0x2bf1, 0x1e95: 0x2bf9, 0x1e96: 0x2c01, 0x1e97: 0x2c09,
+ 0x1e98: 0x2c11, 0x1e99: 0x2c19, 0x1e9a: 0x2c21, 0x1e9b: 0x2c29, 0x1e9c: 0x2c31, 0x1e9d: 0x8e55,
+ 0x1e9e: 0x2c39, 0x1e9f: 0x2c41, 0x1ea0: 0x2c49, 0x1ea1: 0x2c51, 0x1ea2: 0x2c59, 0x1ea3: 0x8e75,
+ 0x1ea4: 0x2c61, 0x1ea5: 0x2c69, 0x1ea6: 0x2c71, 0x1ea7: 0x2c79, 0x1ea8: 0x2c81, 0x1ea9: 0x2c89,
+ 0x1eaa: 0x2c91, 0x1eab: 0x2c99, 0x1eac: 0x7f0d, 0x1ead: 0x2ca1, 0x1eae: 0x2ca9, 0x1eaf: 0x2cb1,
+ 0x1eb0: 0x8e95, 0x1eb1: 0x2cb9, 0x1eb2: 0x2cc1, 0x1eb3: 0x2cc9, 0x1eb4: 0x2cd1, 0x1eb5: 0x2cd9,
+ 0x1eb6: 0x2ce1, 0x1eb7: 0x8eb5, 0x1eb8: 0x8ed5, 0x1eb9: 0x8ef5, 0x1eba: 0x2ce9, 0x1ebb: 0x8f15,
+ 0x1ebc: 0x2cf1, 0x1ebd: 0x2cf9, 0x1ebe: 0x2d01, 0x1ebf: 0x2d09,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0x2d11, 0x1ec1: 0x2d19, 0x1ec2: 0x2d21, 0x1ec3: 0x2d29, 0x1ec4: 0x2d31, 0x1ec5: 0x2d39,
+ 0x1ec6: 0x8f35, 0x1ec7: 0x2d41, 0x1ec8: 0x2d49, 0x1ec9: 0x2d51, 0x1eca: 0x2d59, 0x1ecb: 0x2d61,
+ 0x1ecc: 0x2d69, 0x1ecd: 0x8f55, 0x1ece: 0x2d71, 0x1ecf: 0x2d79, 0x1ed0: 0x8f75, 0x1ed1: 0x8f95,
+ 0x1ed2: 0x2d81, 0x1ed3: 0x2d89, 0x1ed4: 0x2d91, 0x1ed5: 0x2d99, 0x1ed6: 0x2da1, 0x1ed7: 0x2da9,
+ 0x1ed8: 0x2db1, 0x1ed9: 0x2db9, 0x1eda: 0x2dc1, 0x1edb: 0x8fb5, 0x1edc: 0x2dc9, 0x1edd: 0x8fd5,
+ 0x1ede: 0x2dd1, 0x1edf: 0x2040, 0x1ee0: 0x2dd9, 0x1ee1: 0x2de1, 0x1ee2: 0x2de9, 0x1ee3: 0x8ff5,
+ 0x1ee4: 0x2df1, 0x1ee5: 0x2df9, 0x1ee6: 0x9015, 0x1ee7: 0x9035, 0x1ee8: 0x2e01, 0x1ee9: 0x2e09,
+ 0x1eea: 0x2e11, 0x1eeb: 0x2e19, 0x1eec: 0x2e21, 0x1eed: 0x2e21, 0x1eee: 0x2e29, 0x1eef: 0x2e31,
+ 0x1ef0: 0x2e39, 0x1ef1: 0x2e41, 0x1ef2: 0x2e49, 0x1ef3: 0x2e51, 0x1ef4: 0x2e59, 0x1ef5: 0x9055,
+ 0x1ef6: 0x2e61, 0x1ef7: 0x9075, 0x1ef8: 0x2e69, 0x1ef9: 0x9095, 0x1efa: 0x2e71, 0x1efb: 0x90b5,
+ 0x1efc: 0x90d5, 0x1efd: 0x90f5, 0x1efe: 0x2e79, 0x1eff: 0x2e81,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0x2e89, 0x1f01: 0x9115, 0x1f02: 0x9135, 0x1f03: 0x9155, 0x1f04: 0x9175, 0x1f05: 0x2e91,
+ 0x1f06: 0x2e99, 0x1f07: 0x2e99, 0x1f08: 0x2ea1, 0x1f09: 0x2ea9, 0x1f0a: 0x2eb1, 0x1f0b: 0x2eb9,
+ 0x1f0c: 0x2ec1, 0x1f0d: 0x9195, 0x1f0e: 0x2ec9, 0x1f0f: 0x2ed1, 0x1f10: 0x2ed9, 0x1f11: 0x2ee1,
+ 0x1f12: 0x91b5, 0x1f13: 0x2ee9, 0x1f14: 0x91d5, 0x1f15: 0x91f5, 0x1f16: 0x2ef1, 0x1f17: 0x2ef9,
+ 0x1f18: 0x2f01, 0x1f19: 0x2f09, 0x1f1a: 0x2f11, 0x1f1b: 0x2f19, 0x1f1c: 0x9215, 0x1f1d: 0x9235,
+ 0x1f1e: 0x9255, 0x1f1f: 0x2040, 0x1f20: 0x2f21, 0x1f21: 0x9275, 0x1f22: 0x2f29, 0x1f23: 0x2f31,
+ 0x1f24: 0x2f39, 0x1f25: 0x9295, 0x1f26: 0x2f41, 0x1f27: 0x2f49, 0x1f28: 0x2f51, 0x1f29: 0x2f59,
+ 0x1f2a: 0x2f61, 0x1f2b: 0x92b5, 0x1f2c: 0x2f69, 0x1f2d: 0x2f71, 0x1f2e: 0x2f79, 0x1f2f: 0x2f81,
+ 0x1f30: 0x2f89, 0x1f31: 0x2f91, 0x1f32: 0x92d5, 0x1f33: 0x92f5, 0x1f34: 0x2f99, 0x1f35: 0x9315,
+ 0x1f36: 0x2fa1, 0x1f37: 0x9335, 0x1f38: 0x2fa9, 0x1f39: 0x2fb1, 0x1f3a: 0x2fb9, 0x1f3b: 0x9355,
+ 0x1f3c: 0x9375, 0x1f3d: 0x2fc1, 0x1f3e: 0x9395, 0x1f3f: 0x2fc9,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0x93b5, 0x1f41: 0x2fd1, 0x1f42: 0x2fd9, 0x1f43: 0x2fe1, 0x1f44: 0x2fe9, 0x1f45: 0x2ff1,
+ 0x1f46: 0x2ff9, 0x1f47: 0x93d5, 0x1f48: 0x93f5, 0x1f49: 0x9415, 0x1f4a: 0x9435, 0x1f4b: 0x2a29,
+ 0x1f4c: 0x3001, 0x1f4d: 0x3009, 0x1f4e: 0x3011, 0x1f4f: 0x3019, 0x1f50: 0x3021, 0x1f51: 0x3029,
+ 0x1f52: 0x3031, 0x1f53: 0x3039, 0x1f54: 0x3041, 0x1f55: 0x3049, 0x1f56: 0x3051, 0x1f57: 0x9455,
+ 0x1f58: 0x3059, 0x1f59: 0x3061, 0x1f5a: 0x3069, 0x1f5b: 0x3071, 0x1f5c: 0x3079, 0x1f5d: 0x3081,
+ 0x1f5e: 0x3089, 0x1f5f: 0x3091, 0x1f60: 0x3099, 0x1f61: 0x30a1, 0x1f62: 0x30a9, 0x1f63: 0x30b1,
+ 0x1f64: 0x9475, 0x1f65: 0x9495, 0x1f66: 0x94b5, 0x1f67: 0x30b9, 0x1f68: 0x30c1, 0x1f69: 0x30c9,
+ 0x1f6a: 0x30d1, 0x1f6b: 0x94d5, 0x1f6c: 0x30d9, 0x1f6d: 0x94f5, 0x1f6e: 0x30e1, 0x1f6f: 0x30e9,
+ 0x1f70: 0x9515, 0x1f71: 0x9535, 0x1f72: 0x30f1, 0x1f73: 0x30f9, 0x1f74: 0x3101, 0x1f75: 0x3109,
+ 0x1f76: 0x3111, 0x1f77: 0x3119, 0x1f78: 0x3121, 0x1f79: 0x3129, 0x1f7a: 0x3131, 0x1f7b: 0x3139,
+ 0x1f7c: 0x3141, 0x1f7d: 0x3149, 0x1f7e: 0x3151, 0x1f7f: 0x2040,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0x3159, 0x1f81: 0x3161, 0x1f82: 0x3169, 0x1f83: 0x3171, 0x1f84: 0x3179, 0x1f85: 0x9555,
+ 0x1f86: 0x3181, 0x1f87: 0x3189, 0x1f88: 0x3191, 0x1f89: 0x3199, 0x1f8a: 0x31a1, 0x1f8b: 0x9575,
+ 0x1f8c: 0x9595, 0x1f8d: 0x31a9, 0x1f8e: 0x31b1, 0x1f8f: 0x31b9, 0x1f90: 0x31c1, 0x1f91: 0x31c9,
+ 0x1f92: 0x31d1, 0x1f93: 0x95b5, 0x1f94: 0x31d9, 0x1f95: 0x31e1, 0x1f96: 0x31e9, 0x1f97: 0x31f1,
+ 0x1f98: 0x95d5, 0x1f99: 0x95f5, 0x1f9a: 0x31f9, 0x1f9b: 0x3201, 0x1f9c: 0x3209, 0x1f9d: 0x9615,
+ 0x1f9e: 0x3211, 0x1f9f: 0x3219, 0x1fa0: 0x684d, 0x1fa1: 0x9635, 0x1fa2: 0x3221, 0x1fa3: 0x3229,
+ 0x1fa4: 0x3231, 0x1fa5: 0x9655, 0x1fa6: 0x3239, 0x1fa7: 0x3241, 0x1fa8: 0x3249, 0x1fa9: 0x3251,
+ 0x1faa: 0x3259, 0x1fab: 0x3261, 0x1fac: 0x3269, 0x1fad: 0x9675, 0x1fae: 0x3271, 0x1faf: 0x3279,
+ 0x1fb0: 0x3281, 0x1fb1: 0x9695, 0x1fb2: 0x3289, 0x1fb3: 0x3291, 0x1fb4: 0x3299, 0x1fb5: 0x32a1,
+ 0x1fb6: 0x7b6d, 0x1fb7: 0x96b5, 0x1fb8: 0x32a9, 0x1fb9: 0x32b1, 0x1fba: 0x32b9, 0x1fbb: 0x96d5,
+ 0x1fbc: 0x32c1, 0x1fbd: 0x96f5, 0x1fbe: 0x32c9, 0x1fbf: 0x32c9,
+ // Block 0x7f, offset 0x1fc0
+ 0x1fc0: 0x32d1, 0x1fc1: 0x9715, 0x1fc2: 0x32d9, 0x1fc3: 0x32e1, 0x1fc4: 0x32e9, 0x1fc5: 0x32f1,
+ 0x1fc6: 0x32f9, 0x1fc7: 0x3301, 0x1fc8: 0x3309, 0x1fc9: 0x9735, 0x1fca: 0x3311, 0x1fcb: 0x3319,
+ 0x1fcc: 0x3321, 0x1fcd: 0x3329, 0x1fce: 0x3331, 0x1fcf: 0x3339, 0x1fd0: 0x9755, 0x1fd1: 0x3341,
+ 0x1fd2: 0x9775, 0x1fd3: 0x9795, 0x1fd4: 0x97b5, 0x1fd5: 0x3349, 0x1fd6: 0x3351, 0x1fd7: 0x3359,
+ 0x1fd8: 0x3361, 0x1fd9: 0x3369, 0x1fda: 0x3371, 0x1fdb: 0x3379, 0x1fdc: 0x3381, 0x1fdd: 0x97d5,
+ 0x1fde: 0x0040, 0x1fdf: 0x0040, 0x1fe0: 0x0040, 0x1fe1: 0x0040, 0x1fe2: 0x0040, 0x1fe3: 0x0040,
+ 0x1fe4: 0x0040, 0x1fe5: 0x0040, 0x1fe6: 0x0040, 0x1fe7: 0x0040, 0x1fe8: 0x0040, 0x1fe9: 0x0040,
+ 0x1fea: 0x0040, 0x1feb: 0x0040, 0x1fec: 0x0040, 0x1fed: 0x0040, 0x1fee: 0x0040, 0x1fef: 0x0040,
+ 0x1ff0: 0x0040, 0x1ff1: 0x0040, 0x1ff2: 0x0040, 0x1ff3: 0x0040, 0x1ff4: 0x0040, 0x1ff5: 0x0040,
+ 0x1ff6: 0x0040, 0x1ff7: 0x0040, 0x1ff8: 0x0040, 0x1ff9: 0x0040, 0x1ffa: 0x0040, 0x1ffb: 0x0040,
+ 0x1ffc: 0x0040, 0x1ffd: 0x0040, 0x1ffe: 0x0040, 0x1fff: 0x0040,
+}
+
+// idnaIndex: 37 blocks, 2368 entries, 4736 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2368]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7e, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7f, 0xca: 0x80, 0xcb: 0x07, 0xcc: 0x81, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x82, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x83, 0xd6: 0x84, 0xd7: 0x85,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x86, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x87, 0xde: 0x88, 0xdf: 0x89,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1e, 0xf1: 0x1f, 0xf2: 0x1f, 0xf3: 0x21, 0xf4: 0x22,
+ // Block 0x4, offset 0x100
+ 0x120: 0x8a, 0x121: 0x13, 0x122: 0x8b, 0x123: 0x8c, 0x124: 0x8d, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8e,
+ 0x130: 0x8f, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x90, 0x135: 0x21, 0x136: 0x91, 0x137: 0x92,
+ 0x138: 0x93, 0x139: 0x94, 0x13a: 0x22, 0x13b: 0x95, 0x13c: 0x96, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x97,
+ // Block 0x5, offset 0x140
+ 0x140: 0x98, 0x141: 0x99, 0x142: 0x9a, 0x143: 0x9b, 0x144: 0x9c, 0x145: 0x9d, 0x146: 0x9e, 0x147: 0x9f,
+ 0x148: 0xa0, 0x149: 0xa1, 0x14a: 0xa2, 0x14b: 0xa3, 0x14c: 0xa4, 0x14d: 0xa5, 0x14e: 0xa6, 0x14f: 0xa7,
+ 0x150: 0xa8, 0x151: 0xa0, 0x152: 0xa0, 0x153: 0xa0, 0x154: 0xa0, 0x155: 0xa0, 0x156: 0xa0, 0x157: 0xa0,
+ 0x158: 0xa0, 0x159: 0xa9, 0x15a: 0xaa, 0x15b: 0xab, 0x15c: 0xac, 0x15d: 0xad, 0x15e: 0xae, 0x15f: 0xaf,
+ 0x160: 0xb0, 0x161: 0xb1, 0x162: 0xb2, 0x163: 0xb3, 0x164: 0xb4, 0x165: 0xb5, 0x166: 0xb6, 0x167: 0xb7,
+ 0x168: 0xb8, 0x169: 0xb9, 0x16a: 0xba, 0x16b: 0xbb, 0x16c: 0xbc, 0x16d: 0xbd, 0x16e: 0xbe, 0x16f: 0xbf,
+ 0x170: 0xc0, 0x171: 0xc1, 0x172: 0xc2, 0x173: 0xc3, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc4,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc5, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc6, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc7, 0x187: 0x9c,
+ 0x188: 0xc8, 0x189: 0xc9, 0x18a: 0x9c, 0x18b: 0x9c, 0x18c: 0xca, 0x18d: 0x9c, 0x18e: 0x9c, 0x18f: 0x9c,
+ 0x190: 0xcb, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9c, 0x195: 0x9c, 0x196: 0x9c, 0x197: 0x9c,
+ 0x198: 0x9c, 0x199: 0x9c, 0x19a: 0x9c, 0x19b: 0x9c, 0x19c: 0x9c, 0x19d: 0x9c, 0x19e: 0x9c, 0x19f: 0x9c,
+ 0x1a0: 0x9c, 0x1a1: 0x9c, 0x1a2: 0x9c, 0x1a3: 0x9c, 0x1a4: 0x9c, 0x1a5: 0x9c, 0x1a6: 0x9c, 0x1a7: 0x9c,
+ 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9c, 0x1ab: 0xce, 0x1ac: 0x9c, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0x9c,
+ 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5,
+ 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1,
+ 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0xe3, 0x1cd: 0xe4, 0x1ce: 0x3e, 0x1cf: 0x3f,
+ 0x1d0: 0xa0, 0x1d1: 0xa0, 0x1d2: 0xa0, 0x1d3: 0xa0, 0x1d4: 0xa0, 0x1d5: 0xa0, 0x1d6: 0xa0, 0x1d7: 0xa0,
+ 0x1d8: 0xa0, 0x1d9: 0xa0, 0x1da: 0xa0, 0x1db: 0xa0, 0x1dc: 0xa0, 0x1dd: 0xa0, 0x1de: 0xa0, 0x1df: 0xa0,
+ 0x1e0: 0xa0, 0x1e1: 0xa0, 0x1e2: 0xa0, 0x1e3: 0xa0, 0x1e4: 0xa0, 0x1e5: 0xa0, 0x1e6: 0xa0, 0x1e7: 0xa0,
+ 0x1e8: 0xa0, 0x1e9: 0xa0, 0x1ea: 0xa0, 0x1eb: 0xa0, 0x1ec: 0xa0, 0x1ed: 0xa0, 0x1ee: 0xa0, 0x1ef: 0xa0,
+ 0x1f0: 0xa0, 0x1f1: 0xa0, 0x1f2: 0xa0, 0x1f3: 0xa0, 0x1f4: 0xa0, 0x1f5: 0xa0, 0x1f6: 0xa0, 0x1f7: 0xa0,
+ 0x1f8: 0xa0, 0x1f9: 0xa0, 0x1fa: 0xa0, 0x1fb: 0xa0, 0x1fc: 0xa0, 0x1fd: 0xa0, 0x1fe: 0xa0, 0x1ff: 0xa0,
+ // Block 0x8, offset 0x200
+ 0x200: 0xa0, 0x201: 0xa0, 0x202: 0xa0, 0x203: 0xa0, 0x204: 0xa0, 0x205: 0xa0, 0x206: 0xa0, 0x207: 0xa0,
+ 0x208: 0xa0, 0x209: 0xa0, 0x20a: 0xa0, 0x20b: 0xa0, 0x20c: 0xa0, 0x20d: 0xa0, 0x20e: 0xa0, 0x20f: 0xa0,
+ 0x210: 0xa0, 0x211: 0xa0, 0x212: 0xa0, 0x213: 0xa0, 0x214: 0xa0, 0x215: 0xa0, 0x216: 0xa0, 0x217: 0xa0,
+ 0x218: 0xa0, 0x219: 0xa0, 0x21a: 0xa0, 0x21b: 0xa0, 0x21c: 0xa0, 0x21d: 0xa0, 0x21e: 0xa0, 0x21f: 0xa0,
+ 0x220: 0xa0, 0x221: 0xa0, 0x222: 0xa0, 0x223: 0xa0, 0x224: 0xa0, 0x225: 0xa0, 0x226: 0xa0, 0x227: 0xa0,
+ 0x228: 0xa0, 0x229: 0xa0, 0x22a: 0xa0, 0x22b: 0xa0, 0x22c: 0xa0, 0x22d: 0xa0, 0x22e: 0xa0, 0x22f: 0xa0,
+ 0x230: 0xa0, 0x231: 0xa0, 0x232: 0xa0, 0x233: 0xa0, 0x234: 0xa0, 0x235: 0xa0, 0x236: 0xa0, 0x237: 0x9c,
+ 0x238: 0xa0, 0x239: 0xa0, 0x23a: 0xa0, 0x23b: 0xa0, 0x23c: 0xa0, 0x23d: 0xa0, 0x23e: 0xa0, 0x23f: 0xa0,
+ // Block 0x9, offset 0x240
+ 0x240: 0xa0, 0x241: 0xa0, 0x242: 0xa0, 0x243: 0xa0, 0x244: 0xa0, 0x245: 0xa0, 0x246: 0xa0, 0x247: 0xa0,
+ 0x248: 0xa0, 0x249: 0xa0, 0x24a: 0xa0, 0x24b: 0xa0, 0x24c: 0xa0, 0x24d: 0xa0, 0x24e: 0xa0, 0x24f: 0xa0,
+ 0x250: 0xa0, 0x251: 0xa0, 0x252: 0xa0, 0x253: 0xa0, 0x254: 0xa0, 0x255: 0xa0, 0x256: 0xa0, 0x257: 0xa0,
+ 0x258: 0xa0, 0x259: 0xa0, 0x25a: 0xa0, 0x25b: 0xa0, 0x25c: 0xa0, 0x25d: 0xa0, 0x25e: 0xa0, 0x25f: 0xa0,
+ 0x260: 0xa0, 0x261: 0xa0, 0x262: 0xa0, 0x263: 0xa0, 0x264: 0xa0, 0x265: 0xa0, 0x266: 0xa0, 0x267: 0xa0,
+ 0x268: 0xa0, 0x269: 0xa0, 0x26a: 0xa0, 0x26b: 0xa0, 0x26c: 0xa0, 0x26d: 0xa0, 0x26e: 0xa0, 0x26f: 0xa0,
+ 0x270: 0xa0, 0x271: 0xa0, 0x272: 0xa0, 0x273: 0xa0, 0x274: 0xa0, 0x275: 0xa0, 0x276: 0xa0, 0x277: 0xa0,
+ 0x278: 0xa0, 0x279: 0xa0, 0x27a: 0xa0, 0x27b: 0xa0, 0x27c: 0xa0, 0x27d: 0xa0, 0x27e: 0xa0, 0x27f: 0xa0,
+ // Block 0xa, offset 0x280
+ 0x280: 0xa0, 0x281: 0xa0, 0x282: 0xa0, 0x283: 0xa0, 0x284: 0xa0, 0x285: 0xa0, 0x286: 0xa0, 0x287: 0xa0,
+ 0x288: 0xa0, 0x289: 0xa0, 0x28a: 0xa0, 0x28b: 0xa0, 0x28c: 0xa0, 0x28d: 0xa0, 0x28e: 0xa0, 0x28f: 0xa0,
+ 0x290: 0xa0, 0x291: 0xa0, 0x292: 0xa0, 0x293: 0xa0, 0x294: 0xa0, 0x295: 0xa0, 0x296: 0xa0, 0x297: 0xa0,
+ 0x298: 0xa0, 0x299: 0xa0, 0x29a: 0xa0, 0x29b: 0xa0, 0x29c: 0xa0, 0x29d: 0xa0, 0x29e: 0xa0, 0x29f: 0xa0,
+ 0x2a0: 0xa0, 0x2a1: 0xa0, 0x2a2: 0xa0, 0x2a3: 0xa0, 0x2a4: 0xa0, 0x2a5: 0xa0, 0x2a6: 0xa0, 0x2a7: 0xa0,
+ 0x2a8: 0xa0, 0x2a9: 0xa0, 0x2aa: 0xa0, 0x2ab: 0xa0, 0x2ac: 0xa0, 0x2ad: 0xa0, 0x2ae: 0xa0, 0x2af: 0xa0,
+ 0x2b0: 0xa0, 0x2b1: 0xa0, 0x2b2: 0xa0, 0x2b3: 0xa0, 0x2b4: 0xa0, 0x2b5: 0xa0, 0x2b6: 0xa0, 0x2b7: 0xa0,
+ 0x2b8: 0xa0, 0x2b9: 0xa0, 0x2ba: 0xa0, 0x2bb: 0xa0, 0x2bc: 0xa0, 0x2bd: 0xa0, 0x2be: 0xa0, 0x2bf: 0xe5,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0xa0, 0x2c1: 0xa0, 0x2c2: 0xa0, 0x2c3: 0xa0, 0x2c4: 0xa0, 0x2c5: 0xa0, 0x2c6: 0xa0, 0x2c7: 0xa0,
+ 0x2c8: 0xa0, 0x2c9: 0xa0, 0x2ca: 0xa0, 0x2cb: 0xa0, 0x2cc: 0xa0, 0x2cd: 0xa0, 0x2ce: 0xa0, 0x2cf: 0xa0,
+ 0x2d0: 0xa0, 0x2d1: 0xa0, 0x2d2: 0xe6, 0x2d3: 0xe7, 0x2d4: 0xa0, 0x2d5: 0xa0, 0x2d6: 0xa0, 0x2d7: 0xa0,
+ 0x2d8: 0xe8, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe9, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xea,
+ 0x2e0: 0xeb, 0x2e1: 0xec, 0x2e2: 0xed, 0x2e3: 0xee, 0x2e4: 0xef, 0x2e5: 0xf0, 0x2e6: 0xf1, 0x2e7: 0xf2,
+ 0x2e8: 0xf3, 0x2e9: 0xf4, 0x2ea: 0xf5, 0x2eb: 0xf6, 0x2ec: 0xf7, 0x2ed: 0xf8, 0x2ee: 0xf9, 0x2ef: 0xfa,
+ 0x2f0: 0xa0, 0x2f1: 0xa0, 0x2f2: 0xa0, 0x2f3: 0xa0, 0x2f4: 0xa0, 0x2f5: 0xa0, 0x2f6: 0xa0, 0x2f7: 0xa0,
+ 0x2f8: 0xa0, 0x2f9: 0xa0, 0x2fa: 0xa0, 0x2fb: 0xa0, 0x2fc: 0xa0, 0x2fd: 0xa0, 0x2fe: 0xa0, 0x2ff: 0xa0,
+ // Block 0xc, offset 0x300
+ 0x300: 0xa0, 0x301: 0xa0, 0x302: 0xa0, 0x303: 0xa0, 0x304: 0xa0, 0x305: 0xa0, 0x306: 0xa0, 0x307: 0xa0,
+ 0x308: 0xa0, 0x309: 0xa0, 0x30a: 0xa0, 0x30b: 0xa0, 0x30c: 0xa0, 0x30d: 0xa0, 0x30e: 0xa0, 0x30f: 0xa0,
+ 0x310: 0xa0, 0x311: 0xa0, 0x312: 0xa0, 0x313: 0xa0, 0x314: 0xa0, 0x315: 0xa0, 0x316: 0xa0, 0x317: 0xa0,
+ 0x318: 0xa0, 0x319: 0xa0, 0x31a: 0xa0, 0x31b: 0xa0, 0x31c: 0xa0, 0x31d: 0xa0, 0x31e: 0xfb, 0x31f: 0xfc,
+ // Block 0xd, offset 0x340
+ 0x340: 0xfd, 0x341: 0xfd, 0x342: 0xfd, 0x343: 0xfd, 0x344: 0xfd, 0x345: 0xfd, 0x346: 0xfd, 0x347: 0xfd,
+ 0x348: 0xfd, 0x349: 0xfd, 0x34a: 0xfd, 0x34b: 0xfd, 0x34c: 0xfd, 0x34d: 0xfd, 0x34e: 0xfd, 0x34f: 0xfd,
+ 0x350: 0xfd, 0x351: 0xfd, 0x352: 0xfd, 0x353: 0xfd, 0x354: 0xfd, 0x355: 0xfd, 0x356: 0xfd, 0x357: 0xfd,
+ 0x358: 0xfd, 0x359: 0xfd, 0x35a: 0xfd, 0x35b: 0xfd, 0x35c: 0xfd, 0x35d: 0xfd, 0x35e: 0xfd, 0x35f: 0xfd,
+ 0x360: 0xfd, 0x361: 0xfd, 0x362: 0xfd, 0x363: 0xfd, 0x364: 0xfd, 0x365: 0xfd, 0x366: 0xfd, 0x367: 0xfd,
+ 0x368: 0xfd, 0x369: 0xfd, 0x36a: 0xfd, 0x36b: 0xfd, 0x36c: 0xfd, 0x36d: 0xfd, 0x36e: 0xfd, 0x36f: 0xfd,
+ 0x370: 0xfd, 0x371: 0xfd, 0x372: 0xfd, 0x373: 0xfd, 0x374: 0xfd, 0x375: 0xfd, 0x376: 0xfd, 0x377: 0xfd,
+ 0x378: 0xfd, 0x379: 0xfd, 0x37a: 0xfd, 0x37b: 0xfd, 0x37c: 0xfd, 0x37d: 0xfd, 0x37e: 0xfd, 0x37f: 0xfd,
+ // Block 0xe, offset 0x380
+ 0x380: 0xfd, 0x381: 0xfd, 0x382: 0xfd, 0x383: 0xfd, 0x384: 0xfd, 0x385: 0xfd, 0x386: 0xfd, 0x387: 0xfd,
+ 0x388: 0xfd, 0x389: 0xfd, 0x38a: 0xfd, 0x38b: 0xfd, 0x38c: 0xfd, 0x38d: 0xfd, 0x38e: 0xfd, 0x38f: 0xfd,
+ 0x390: 0xfd, 0x391: 0xfd, 0x392: 0xfd, 0x393: 0xfd, 0x394: 0xfd, 0x395: 0xfd, 0x396: 0xfd, 0x397: 0xfd,
+ 0x398: 0xfd, 0x399: 0xfd, 0x39a: 0xfd, 0x39b: 0xfd, 0x39c: 0xfd, 0x39d: 0xfd, 0x39e: 0xfd, 0x39f: 0xfd,
+ 0x3a0: 0xfd, 0x3a1: 0xfd, 0x3a2: 0xfd, 0x3a3: 0xfd, 0x3a4: 0xfe, 0x3a5: 0xff, 0x3a6: 0x100, 0x3a7: 0x101,
+ 0x3a8: 0x45, 0x3a9: 0x102, 0x3aa: 0x103, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a,
+ 0x3b0: 0x104, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x105, 0x3b7: 0x50,
+ 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x106, 0x3c1: 0x107, 0x3c2: 0xa0, 0x3c3: 0x108, 0x3c4: 0x109, 0x3c5: 0x9c, 0x3c6: 0x10a, 0x3c7: 0x10b,
+ 0x3c8: 0xfd, 0x3c9: 0xfd, 0x3ca: 0x10c, 0x3cb: 0x10d, 0x3cc: 0x10e, 0x3cd: 0x10f, 0x3ce: 0x110, 0x3cf: 0x111,
+ 0x3d0: 0x112, 0x3d1: 0xa0, 0x3d2: 0x113, 0x3d3: 0x114, 0x3d4: 0x115, 0x3d5: 0x116, 0x3d6: 0xfd, 0x3d7: 0xfd,
+ 0x3d8: 0xa0, 0x3d9: 0xa0, 0x3da: 0xa0, 0x3db: 0xa0, 0x3dc: 0x117, 0x3dd: 0x118, 0x3de: 0xfd, 0x3df: 0xfd,
+ 0x3e0: 0x119, 0x3e1: 0x11a, 0x3e2: 0x11b, 0x3e3: 0x11c, 0x3e4: 0x11d, 0x3e5: 0xfd, 0x3e6: 0x11e, 0x3e7: 0x11f,
+ 0x3e8: 0x120, 0x3e9: 0x121, 0x3ea: 0x122, 0x3eb: 0x59, 0x3ec: 0x123, 0x3ed: 0x124, 0x3ee: 0x5a, 0x3ef: 0xfd,
+ 0x3f0: 0x125, 0x3f1: 0x126, 0x3f2: 0x127, 0x3f3: 0x128, 0x3f4: 0x129, 0x3f5: 0xfd, 0x3f6: 0xfd, 0x3f7: 0xfd,
+ 0x3f8: 0xfd, 0x3f9: 0x12a, 0x3fa: 0x12b, 0x3fb: 0xfd, 0x3fc: 0x12c, 0x3fd: 0x12d, 0x3fe: 0x12e, 0x3ff: 0x12f,
+ // Block 0x10, offset 0x400
+ 0x400: 0x130, 0x401: 0x131, 0x402: 0x132, 0x403: 0x133, 0x404: 0x134, 0x405: 0x135, 0x406: 0x136, 0x407: 0x137,
+ 0x408: 0x138, 0x409: 0xfd, 0x40a: 0x139, 0x40b: 0x13a, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xfd, 0x40f: 0xfd,
+ 0x410: 0x13b, 0x411: 0x13c, 0x412: 0x13d, 0x413: 0x13e, 0x414: 0xfd, 0x415: 0xfd, 0x416: 0x13f, 0x417: 0x140,
+ 0x418: 0x141, 0x419: 0x142, 0x41a: 0x143, 0x41b: 0x144, 0x41c: 0x145, 0x41d: 0xfd, 0x41e: 0xfd, 0x41f: 0xfd,
+ 0x420: 0x146, 0x421: 0xfd, 0x422: 0x147, 0x423: 0x148, 0x424: 0x5d, 0x425: 0x149, 0x426: 0x14a, 0x427: 0x14b,
+ 0x428: 0x14c, 0x429: 0x14d, 0x42a: 0x14e, 0x42b: 0x14f, 0x42c: 0xfd, 0x42d: 0xfd, 0x42e: 0xfd, 0x42f: 0xfd,
+ 0x430: 0x150, 0x431: 0x151, 0x432: 0x152, 0x433: 0xfd, 0x434: 0x153, 0x435: 0x154, 0x436: 0x155, 0x437: 0xfd,
+ 0x438: 0xfd, 0x439: 0xfd, 0x43a: 0xfd, 0x43b: 0x156, 0x43c: 0xfd, 0x43d: 0xfd, 0x43e: 0x157, 0x43f: 0x158,
+ // Block 0x11, offset 0x440
+ 0x440: 0xa0, 0x441: 0xa0, 0x442: 0xa0, 0x443: 0xa0, 0x444: 0xa0, 0x445: 0xa0, 0x446: 0xa0, 0x447: 0xa0,
+ 0x448: 0xa0, 0x449: 0xa0, 0x44a: 0xa0, 0x44b: 0xa0, 0x44c: 0xa0, 0x44d: 0xa0, 0x44e: 0x159, 0x44f: 0xfd,
+ 0x450: 0x9c, 0x451: 0x15a, 0x452: 0xa0, 0x453: 0xa0, 0x454: 0xa0, 0x455: 0x15b, 0x456: 0xfd, 0x457: 0xfd,
+ 0x458: 0xfd, 0x459: 0xfd, 0x45a: 0xfd, 0x45b: 0xfd, 0x45c: 0xfd, 0x45d: 0xfd, 0x45e: 0xfd, 0x45f: 0xfd,
+ 0x460: 0xfd, 0x461: 0xfd, 0x462: 0xfd, 0x463: 0xfd, 0x464: 0xfd, 0x465: 0xfd, 0x466: 0xfd, 0x467: 0xfd,
+ 0x468: 0xfd, 0x469: 0xfd, 0x46a: 0xfd, 0x46b: 0xfd, 0x46c: 0xfd, 0x46d: 0xfd, 0x46e: 0xfd, 0x46f: 0xfd,
+ 0x470: 0xfd, 0x471: 0xfd, 0x472: 0xfd, 0x473: 0xfd, 0x474: 0xfd, 0x475: 0xfd, 0x476: 0xfd, 0x477: 0xfd,
+ 0x478: 0xfd, 0x479: 0xfd, 0x47a: 0xfd, 0x47b: 0xfd, 0x47c: 0xfd, 0x47d: 0xfd, 0x47e: 0xfd, 0x47f: 0xfd,
+ // Block 0x12, offset 0x480
+ 0x480: 0xa0, 0x481: 0xa0, 0x482: 0xa0, 0x483: 0xa0, 0x484: 0xa0, 0x485: 0xa0, 0x486: 0xa0, 0x487: 0xa0,
+ 0x488: 0xa0, 0x489: 0xa0, 0x48a: 0xa0, 0x48b: 0xa0, 0x48c: 0xa0, 0x48d: 0xa0, 0x48e: 0xa0, 0x48f: 0xa0,
+ 0x490: 0x15c, 0x491: 0xfd, 0x492: 0xfd, 0x493: 0xfd, 0x494: 0xfd, 0x495: 0xfd, 0x496: 0xfd, 0x497: 0xfd,
+ 0x498: 0xfd, 0x499: 0xfd, 0x49a: 0xfd, 0x49b: 0xfd, 0x49c: 0xfd, 0x49d: 0xfd, 0x49e: 0xfd, 0x49f: 0xfd,
+ 0x4a0: 0xfd, 0x4a1: 0xfd, 0x4a2: 0xfd, 0x4a3: 0xfd, 0x4a4: 0xfd, 0x4a5: 0xfd, 0x4a6: 0xfd, 0x4a7: 0xfd,
+ 0x4a8: 0xfd, 0x4a9: 0xfd, 0x4aa: 0xfd, 0x4ab: 0xfd, 0x4ac: 0xfd, 0x4ad: 0xfd, 0x4ae: 0xfd, 0x4af: 0xfd,
+ 0x4b0: 0xfd, 0x4b1: 0xfd, 0x4b2: 0xfd, 0x4b3: 0xfd, 0x4b4: 0xfd, 0x4b5: 0xfd, 0x4b6: 0xfd, 0x4b7: 0xfd,
+ 0x4b8: 0xfd, 0x4b9: 0xfd, 0x4ba: 0xfd, 0x4bb: 0xfd, 0x4bc: 0xfd, 0x4bd: 0xfd, 0x4be: 0xfd, 0x4bf: 0xfd,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xfd, 0x4c1: 0xfd, 0x4c2: 0xfd, 0x4c3: 0xfd, 0x4c4: 0xfd, 0x4c5: 0xfd, 0x4c6: 0xfd, 0x4c7: 0xfd,
+ 0x4c8: 0xfd, 0x4c9: 0xfd, 0x4ca: 0xfd, 0x4cb: 0xfd, 0x4cc: 0xfd, 0x4cd: 0xfd, 0x4ce: 0xfd, 0x4cf: 0xfd,
+ 0x4d0: 0xa0, 0x4d1: 0xa0, 0x4d2: 0xa0, 0x4d3: 0xa0, 0x4d4: 0xa0, 0x4d5: 0xa0, 0x4d6: 0xa0, 0x4d7: 0xa0,
+ 0x4d8: 0xa0, 0x4d9: 0x15d, 0x4da: 0xfd, 0x4db: 0xfd, 0x4dc: 0xfd, 0x4dd: 0xfd, 0x4de: 0xfd, 0x4df: 0xfd,
+ 0x4e0: 0xfd, 0x4e1: 0xfd, 0x4e2: 0xfd, 0x4e3: 0xfd, 0x4e4: 0xfd, 0x4e5: 0xfd, 0x4e6: 0xfd, 0x4e7: 0xfd,
+ 0x4e8: 0xfd, 0x4e9: 0xfd, 0x4ea: 0xfd, 0x4eb: 0xfd, 0x4ec: 0xfd, 0x4ed: 0xfd, 0x4ee: 0xfd, 0x4ef: 0xfd,
+ 0x4f0: 0xfd, 0x4f1: 0xfd, 0x4f2: 0xfd, 0x4f3: 0xfd, 0x4f4: 0xfd, 0x4f5: 0xfd, 0x4f6: 0xfd, 0x4f7: 0xfd,
+ 0x4f8: 0xfd, 0x4f9: 0xfd, 0x4fa: 0xfd, 0x4fb: 0xfd, 0x4fc: 0xfd, 0x4fd: 0xfd, 0x4fe: 0xfd, 0x4ff: 0xfd,
+ // Block 0x14, offset 0x500
+ 0x500: 0xfd, 0x501: 0xfd, 0x502: 0xfd, 0x503: 0xfd, 0x504: 0xfd, 0x505: 0xfd, 0x506: 0xfd, 0x507: 0xfd,
+ 0x508: 0xfd, 0x509: 0xfd, 0x50a: 0xfd, 0x50b: 0xfd, 0x50c: 0xfd, 0x50d: 0xfd, 0x50e: 0xfd, 0x50f: 0xfd,
+ 0x510: 0xfd, 0x511: 0xfd, 0x512: 0xfd, 0x513: 0xfd, 0x514: 0xfd, 0x515: 0xfd, 0x516: 0xfd, 0x517: 0xfd,
+ 0x518: 0xfd, 0x519: 0xfd, 0x51a: 0xfd, 0x51b: 0xfd, 0x51c: 0xfd, 0x51d: 0xfd, 0x51e: 0xfd, 0x51f: 0xfd,
+ 0x520: 0xa0, 0x521: 0xa0, 0x522: 0xa0, 0x523: 0xa0, 0x524: 0xa0, 0x525: 0xa0, 0x526: 0xa0, 0x527: 0xa0,
+ 0x528: 0x14f, 0x529: 0x15e, 0x52a: 0xfd, 0x52b: 0x15f, 0x52c: 0x160, 0x52d: 0x161, 0x52e: 0x162, 0x52f: 0xfd,
+ 0x530: 0xfd, 0x531: 0xfd, 0x532: 0xfd, 0x533: 0xfd, 0x534: 0xfd, 0x535: 0xfd, 0x536: 0xfd, 0x537: 0xfd,
+ 0x538: 0xfd, 0x539: 0x163, 0x53a: 0x164, 0x53b: 0xfd, 0x53c: 0xa0, 0x53d: 0x165, 0x53e: 0x166, 0x53f: 0x167,
+ // Block 0x15, offset 0x540
+ 0x540: 0xa0, 0x541: 0xa0, 0x542: 0xa0, 0x543: 0xa0, 0x544: 0xa0, 0x545: 0xa0, 0x546: 0xa0, 0x547: 0xa0,
+ 0x548: 0xa0, 0x549: 0xa0, 0x54a: 0xa0, 0x54b: 0xa0, 0x54c: 0xa0, 0x54d: 0xa0, 0x54e: 0xa0, 0x54f: 0xa0,
+ 0x550: 0xa0, 0x551: 0xa0, 0x552: 0xa0, 0x553: 0xa0, 0x554: 0xa0, 0x555: 0xa0, 0x556: 0xa0, 0x557: 0xa0,
+ 0x558: 0xa0, 0x559: 0xa0, 0x55a: 0xa0, 0x55b: 0xa0, 0x55c: 0xa0, 0x55d: 0xa0, 0x55e: 0xa0, 0x55f: 0x168,
+ 0x560: 0xa0, 0x561: 0xa0, 0x562: 0xa0, 0x563: 0xa0, 0x564: 0xa0, 0x565: 0xa0, 0x566: 0xa0, 0x567: 0xa0,
+ 0x568: 0xa0, 0x569: 0xa0, 0x56a: 0xa0, 0x56b: 0xa0, 0x56c: 0xa0, 0x56d: 0xa0, 0x56e: 0xa0, 0x56f: 0xa0,
+ 0x570: 0xa0, 0x571: 0xa0, 0x572: 0xa0, 0x573: 0x169, 0x574: 0x16a, 0x575: 0xfd, 0x576: 0xfd, 0x577: 0xfd,
+ 0x578: 0xfd, 0x579: 0xfd, 0x57a: 0xfd, 0x57b: 0xfd, 0x57c: 0xfd, 0x57d: 0xfd, 0x57e: 0xfd, 0x57f: 0xfd,
+ // Block 0x16, offset 0x580
+ 0x580: 0xa0, 0x581: 0xa0, 0x582: 0xa0, 0x583: 0xa0, 0x584: 0x16b, 0x585: 0x16c, 0x586: 0xa0, 0x587: 0xa0,
+ 0x588: 0xa0, 0x589: 0xa0, 0x58a: 0xa0, 0x58b: 0x16d, 0x58c: 0xfd, 0x58d: 0xfd, 0x58e: 0xfd, 0x58f: 0xfd,
+ 0x590: 0xfd, 0x591: 0xfd, 0x592: 0xfd, 0x593: 0xfd, 0x594: 0xfd, 0x595: 0xfd, 0x596: 0xfd, 0x597: 0xfd,
+ 0x598: 0xfd, 0x599: 0xfd, 0x59a: 0xfd, 0x59b: 0xfd, 0x59c: 0xfd, 0x59d: 0xfd, 0x59e: 0xfd, 0x59f: 0xfd,
+ 0x5a0: 0xfd, 0x5a1: 0xfd, 0x5a2: 0xfd, 0x5a3: 0xfd, 0x5a4: 0xfd, 0x5a5: 0xfd, 0x5a6: 0xfd, 0x5a7: 0xfd,
+ 0x5a8: 0xfd, 0x5a9: 0xfd, 0x5aa: 0xfd, 0x5ab: 0xfd, 0x5ac: 0xfd, 0x5ad: 0xfd, 0x5ae: 0xfd, 0x5af: 0xfd,
+ 0x5b0: 0xa0, 0x5b1: 0x16e, 0x5b2: 0x16f, 0x5b3: 0xfd, 0x5b4: 0xfd, 0x5b5: 0xfd, 0x5b6: 0xfd, 0x5b7: 0xfd,
+ 0x5b8: 0xfd, 0x5b9: 0xfd, 0x5ba: 0xfd, 0x5bb: 0xfd, 0x5bc: 0xfd, 0x5bd: 0xfd, 0x5be: 0xfd, 0x5bf: 0xfd,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9c, 0x5c1: 0x9c, 0x5c2: 0x9c, 0x5c3: 0x170, 0x5c4: 0x171, 0x5c5: 0x172, 0x5c6: 0x173, 0x5c7: 0x174,
+ 0x5c8: 0x9c, 0x5c9: 0x175, 0x5ca: 0xfd, 0x5cb: 0x176, 0x5cc: 0x9c, 0x5cd: 0x177, 0x5ce: 0xfd, 0x5cf: 0xfd,
+ 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65,
+ 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d,
+ 0x5e0: 0x9c, 0x5e1: 0x9c, 0x5e2: 0x9c, 0x5e3: 0x9c, 0x5e4: 0x9c, 0x5e5: 0x9c, 0x5e6: 0x9c, 0x5e7: 0x9c,
+ 0x5e8: 0x178, 0x5e9: 0x179, 0x5ea: 0x17a, 0x5eb: 0xfd, 0x5ec: 0xfd, 0x5ed: 0xfd, 0x5ee: 0xfd, 0x5ef: 0xfd,
+ 0x5f0: 0xfd, 0x5f1: 0xfd, 0x5f2: 0xfd, 0x5f3: 0xfd, 0x5f4: 0xfd, 0x5f5: 0xfd, 0x5f6: 0xfd, 0x5f7: 0xfd,
+ 0x5f8: 0xfd, 0x5f9: 0xfd, 0x5fa: 0xfd, 0x5fb: 0xfd, 0x5fc: 0xfd, 0x5fd: 0xfd, 0x5fe: 0xfd, 0x5ff: 0xfd,
+ // Block 0x18, offset 0x600
+ 0x600: 0x17b, 0x601: 0xfd, 0x602: 0xfd, 0x603: 0xfd, 0x604: 0x17c, 0x605: 0x17d, 0x606: 0xfd, 0x607: 0xfd,
+ 0x608: 0xfd, 0x609: 0xfd, 0x60a: 0xfd, 0x60b: 0x17e, 0x60c: 0xfd, 0x60d: 0xfd, 0x60e: 0xfd, 0x60f: 0xfd,
+ 0x610: 0xfd, 0x611: 0xfd, 0x612: 0xfd, 0x613: 0xfd, 0x614: 0xfd, 0x615: 0xfd, 0x616: 0xfd, 0x617: 0xfd,
+ 0x618: 0xfd, 0x619: 0xfd, 0x61a: 0xfd, 0x61b: 0xfd, 0x61c: 0xfd, 0x61d: 0xfd, 0x61e: 0xfd, 0x61f: 0xfd,
+ 0x620: 0x125, 0x621: 0x125, 0x622: 0x125, 0x623: 0x17f, 0x624: 0x6e, 0x625: 0x180, 0x626: 0xfd, 0x627: 0xfd,
+ 0x628: 0xfd, 0x629: 0xfd, 0x62a: 0xfd, 0x62b: 0xfd, 0x62c: 0xfd, 0x62d: 0xfd, 0x62e: 0xfd, 0x62f: 0xfd,
+ 0x630: 0xfd, 0x631: 0x181, 0x632: 0x182, 0x633: 0xfd, 0x634: 0x183, 0x635: 0xfd, 0x636: 0xfd, 0x637: 0xfd,
+ 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x184, 0x63c: 0xfd, 0x63d: 0xfd, 0x63e: 0xfd, 0x63f: 0xfd,
+ // Block 0x19, offset 0x640
+ 0x640: 0x185, 0x641: 0x9c, 0x642: 0x186, 0x643: 0x187, 0x644: 0x72, 0x645: 0x73, 0x646: 0x188, 0x647: 0x189,
+ 0x648: 0x74, 0x649: 0x18a, 0x64a: 0xfd, 0x64b: 0xfd, 0x64c: 0x9c, 0x64d: 0x9c, 0x64e: 0x9c, 0x64f: 0x9c,
+ 0x650: 0x9c, 0x651: 0x9c, 0x652: 0x9c, 0x653: 0x9c, 0x654: 0x9c, 0x655: 0x9c, 0x656: 0x9c, 0x657: 0x9c,
+ 0x658: 0x9c, 0x659: 0x9c, 0x65a: 0x9c, 0x65b: 0x18b, 0x65c: 0x9c, 0x65d: 0x18c, 0x65e: 0x9c, 0x65f: 0x18d,
+ 0x660: 0x18e, 0x661: 0x18f, 0x662: 0x190, 0x663: 0xfd, 0x664: 0x9c, 0x665: 0x191, 0x666: 0x9c, 0x667: 0x192,
+ 0x668: 0x9c, 0x669: 0x193, 0x66a: 0x194, 0x66b: 0x195, 0x66c: 0x9c, 0x66d: 0x9c, 0x66e: 0x196, 0x66f: 0x197,
+ 0x670: 0xfd, 0x671: 0xfd, 0x672: 0xfd, 0x673: 0xfd, 0x674: 0xfd, 0x675: 0xfd, 0x676: 0xfd, 0x677: 0xfd,
+ 0x678: 0xfd, 0x679: 0xfd, 0x67a: 0xfd, 0x67b: 0xfd, 0x67c: 0xfd, 0x67d: 0xfd, 0x67e: 0xfd, 0x67f: 0xfd,
+ // Block 0x1a, offset 0x680
+ 0x680: 0xa0, 0x681: 0xa0, 0x682: 0xa0, 0x683: 0xa0, 0x684: 0xa0, 0x685: 0xa0, 0x686: 0xa0, 0x687: 0xa0,
+ 0x688: 0xa0, 0x689: 0xa0, 0x68a: 0xa0, 0x68b: 0xa0, 0x68c: 0xa0, 0x68d: 0xa0, 0x68e: 0xa0, 0x68f: 0xa0,
+ 0x690: 0xa0, 0x691: 0xa0, 0x692: 0xa0, 0x693: 0xa0, 0x694: 0xa0, 0x695: 0xa0, 0x696: 0xa0, 0x697: 0xa0,
+ 0x698: 0xa0, 0x699: 0xa0, 0x69a: 0xa0, 0x69b: 0x198, 0x69c: 0xa0, 0x69d: 0xa0, 0x69e: 0xa0, 0x69f: 0xa0,
+ 0x6a0: 0xa0, 0x6a1: 0xa0, 0x6a2: 0xa0, 0x6a3: 0xa0, 0x6a4: 0xa0, 0x6a5: 0xa0, 0x6a6: 0xa0, 0x6a7: 0xa0,
+ 0x6a8: 0xa0, 0x6a9: 0xa0, 0x6aa: 0xa0, 0x6ab: 0xa0, 0x6ac: 0xa0, 0x6ad: 0xa0, 0x6ae: 0xa0, 0x6af: 0xa0,
+ 0x6b0: 0xa0, 0x6b1: 0xa0, 0x6b2: 0xa0, 0x6b3: 0xa0, 0x6b4: 0xa0, 0x6b5: 0xa0, 0x6b6: 0xa0, 0x6b7: 0xa0,
+ 0x6b8: 0xa0, 0x6b9: 0xa0, 0x6ba: 0xa0, 0x6bb: 0xa0, 0x6bc: 0xa0, 0x6bd: 0xa0, 0x6be: 0xa0, 0x6bf: 0xa0,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0xa0, 0x6c1: 0xa0, 0x6c2: 0xa0, 0x6c3: 0xa0, 0x6c4: 0xa0, 0x6c5: 0xa0, 0x6c6: 0xa0, 0x6c7: 0xa0,
+ 0x6c8: 0xa0, 0x6c9: 0xa0, 0x6ca: 0xa0, 0x6cb: 0xa0, 0x6cc: 0xa0, 0x6cd: 0xa0, 0x6ce: 0xa0, 0x6cf: 0xa0,
+ 0x6d0: 0xa0, 0x6d1: 0xa0, 0x6d2: 0xa0, 0x6d3: 0xa0, 0x6d4: 0xa0, 0x6d5: 0xa0, 0x6d6: 0xa0, 0x6d7: 0xa0,
+ 0x6d8: 0xa0, 0x6d9: 0xa0, 0x6da: 0xa0, 0x6db: 0xa0, 0x6dc: 0x199, 0x6dd: 0xa0, 0x6de: 0xa0, 0x6df: 0xa0,
+ 0x6e0: 0x19a, 0x6e1: 0xa0, 0x6e2: 0xa0, 0x6e3: 0xa0, 0x6e4: 0xa0, 0x6e5: 0xa0, 0x6e6: 0xa0, 0x6e7: 0xa0,
+ 0x6e8: 0xa0, 0x6e9: 0xa0, 0x6ea: 0xa0, 0x6eb: 0xa0, 0x6ec: 0xa0, 0x6ed: 0xa0, 0x6ee: 0xa0, 0x6ef: 0xa0,
+ 0x6f0: 0xa0, 0x6f1: 0xa0, 0x6f2: 0xa0, 0x6f3: 0xa0, 0x6f4: 0xa0, 0x6f5: 0xa0, 0x6f6: 0xa0, 0x6f7: 0xa0,
+ 0x6f8: 0xa0, 0x6f9: 0xa0, 0x6fa: 0xa0, 0x6fb: 0xa0, 0x6fc: 0xa0, 0x6fd: 0xa0, 0x6fe: 0xa0, 0x6ff: 0xa0,
+ // Block 0x1c, offset 0x700
+ 0x700: 0xa0, 0x701: 0xa0, 0x702: 0xa0, 0x703: 0xa0, 0x704: 0xa0, 0x705: 0xa0, 0x706: 0xa0, 0x707: 0xa0,
+ 0x708: 0xa0, 0x709: 0xa0, 0x70a: 0xa0, 0x70b: 0xa0, 0x70c: 0xa0, 0x70d: 0xa0, 0x70e: 0xa0, 0x70f: 0xa0,
+ 0x710: 0xa0, 0x711: 0xa0, 0x712: 0xa0, 0x713: 0xa0, 0x714: 0xa0, 0x715: 0xa0, 0x716: 0xa0, 0x717: 0xa0,
+ 0x718: 0xa0, 0x719: 0xa0, 0x71a: 0xa0, 0x71b: 0xa0, 0x71c: 0xa0, 0x71d: 0xa0, 0x71e: 0xa0, 0x71f: 0xa0,
+ 0x720: 0xa0, 0x721: 0xa0, 0x722: 0xa0, 0x723: 0xa0, 0x724: 0xa0, 0x725: 0xa0, 0x726: 0xa0, 0x727: 0xa0,
+ 0x728: 0xa0, 0x729: 0xa0, 0x72a: 0xa0, 0x72b: 0xa0, 0x72c: 0xa0, 0x72d: 0xa0, 0x72e: 0xa0, 0x72f: 0xa0,
+ 0x730: 0xa0, 0x731: 0xa0, 0x732: 0xa0, 0x733: 0xa0, 0x734: 0xa0, 0x735: 0xa0, 0x736: 0xa0, 0x737: 0xa0,
+ 0x738: 0xa0, 0x739: 0xa0, 0x73a: 0x19b, 0x73b: 0xa0, 0x73c: 0xa0, 0x73d: 0xa0, 0x73e: 0xa0, 0x73f: 0xa0,
+ // Block 0x1d, offset 0x740
+ 0x740: 0xa0, 0x741: 0xa0, 0x742: 0xa0, 0x743: 0xa0, 0x744: 0xa0, 0x745: 0xa0, 0x746: 0xa0, 0x747: 0xa0,
+ 0x748: 0xa0, 0x749: 0xa0, 0x74a: 0xa0, 0x74b: 0xa0, 0x74c: 0xa0, 0x74d: 0xa0, 0x74e: 0xa0, 0x74f: 0xa0,
+ 0x750: 0xa0, 0x751: 0xa0, 0x752: 0xa0, 0x753: 0xa0, 0x754: 0xa0, 0x755: 0xa0, 0x756: 0xa0, 0x757: 0xa0,
+ 0x758: 0xa0, 0x759: 0xa0, 0x75a: 0xa0, 0x75b: 0xa0, 0x75c: 0xa0, 0x75d: 0xa0, 0x75e: 0xa0, 0x75f: 0xa0,
+ 0x760: 0xa0, 0x761: 0xa0, 0x762: 0xa0, 0x763: 0xa0, 0x764: 0xa0, 0x765: 0xa0, 0x766: 0xa0, 0x767: 0xa0,
+ 0x768: 0xa0, 0x769: 0xa0, 0x76a: 0xa0, 0x76b: 0xa0, 0x76c: 0xa0, 0x76d: 0xa0, 0x76e: 0xa0, 0x76f: 0x19c,
+ 0x770: 0xfd, 0x771: 0xfd, 0x772: 0xfd, 0x773: 0xfd, 0x774: 0xfd, 0x775: 0xfd, 0x776: 0xfd, 0x777: 0xfd,
+ 0x778: 0xfd, 0x779: 0xfd, 0x77a: 0xfd, 0x77b: 0xfd, 0x77c: 0xfd, 0x77d: 0xfd, 0x77e: 0xfd, 0x77f: 0xfd,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xfd, 0x781: 0xfd, 0x782: 0xfd, 0x783: 0xfd, 0x784: 0xfd, 0x785: 0xfd, 0x786: 0xfd, 0x787: 0xfd,
+ 0x788: 0xfd, 0x789: 0xfd, 0x78a: 0xfd, 0x78b: 0xfd, 0x78c: 0xfd, 0x78d: 0xfd, 0x78e: 0xfd, 0x78f: 0xfd,
+ 0x790: 0xfd, 0x791: 0xfd, 0x792: 0xfd, 0x793: 0xfd, 0x794: 0xfd, 0x795: 0xfd, 0x796: 0xfd, 0x797: 0xfd,
+ 0x798: 0xfd, 0x799: 0xfd, 0x79a: 0xfd, 0x79b: 0xfd, 0x79c: 0xfd, 0x79d: 0xfd, 0x79e: 0xfd, 0x79f: 0xfd,
+ 0x7a0: 0x75, 0x7a1: 0x76, 0x7a2: 0x77, 0x7a3: 0x78, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x7b, 0x7a7: 0x7c,
+ 0x7a8: 0x7d, 0x7a9: 0xfd, 0x7aa: 0xfd, 0x7ab: 0xfd, 0x7ac: 0xfd, 0x7ad: 0xfd, 0x7ae: 0xfd, 0x7af: 0xfd,
+ 0x7b0: 0xfd, 0x7b1: 0xfd, 0x7b2: 0xfd, 0x7b3: 0xfd, 0x7b4: 0xfd, 0x7b5: 0xfd, 0x7b6: 0xfd, 0x7b7: 0xfd,
+ 0x7b8: 0xfd, 0x7b9: 0xfd, 0x7ba: 0xfd, 0x7bb: 0xfd, 0x7bc: 0xfd, 0x7bd: 0xfd, 0x7be: 0xfd, 0x7bf: 0xfd,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0xa0, 0x7c1: 0xa0, 0x7c2: 0xa0, 0x7c3: 0xa0, 0x7c4: 0xa0, 0x7c5: 0xa0, 0x7c6: 0xa0, 0x7c7: 0xa0,
+ 0x7c8: 0xa0, 0x7c9: 0xa0, 0x7ca: 0xa0, 0x7cb: 0xa0, 0x7cc: 0xa0, 0x7cd: 0x19d, 0x7ce: 0xfd, 0x7cf: 0xfd,
+ 0x7d0: 0xfd, 0x7d1: 0xfd, 0x7d2: 0xfd, 0x7d3: 0xfd, 0x7d4: 0xfd, 0x7d5: 0xfd, 0x7d6: 0xfd, 0x7d7: 0xfd,
+ 0x7d8: 0xfd, 0x7d9: 0xfd, 0x7da: 0xfd, 0x7db: 0xfd, 0x7dc: 0xfd, 0x7dd: 0xfd, 0x7de: 0xfd, 0x7df: 0xfd,
+ 0x7e0: 0xfd, 0x7e1: 0xfd, 0x7e2: 0xfd, 0x7e3: 0xfd, 0x7e4: 0xfd, 0x7e5: 0xfd, 0x7e6: 0xfd, 0x7e7: 0xfd,
+ 0x7e8: 0xfd, 0x7e9: 0xfd, 0x7ea: 0xfd, 0x7eb: 0xfd, 0x7ec: 0xfd, 0x7ed: 0xfd, 0x7ee: 0xfd, 0x7ef: 0xfd,
+ 0x7f0: 0xfd, 0x7f1: 0xfd, 0x7f2: 0xfd, 0x7f3: 0xfd, 0x7f4: 0xfd, 0x7f5: 0xfd, 0x7f6: 0xfd, 0x7f7: 0xfd,
+ 0x7f8: 0xfd, 0x7f9: 0xfd, 0x7fa: 0xfd, 0x7fb: 0xfd, 0x7fc: 0xfd, 0x7fd: 0xfd, 0x7fe: 0xfd, 0x7ff: 0xfd,
+ // Block 0x20, offset 0x800
+ 0x810: 0x0d, 0x811: 0x0e, 0x812: 0x0f, 0x813: 0x10, 0x814: 0x11, 0x815: 0x0b, 0x816: 0x12, 0x817: 0x07,
+ 0x818: 0x13, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x14, 0x81c: 0x0b, 0x81d: 0x15, 0x81e: 0x16, 0x81f: 0x17,
+ 0x820: 0x07, 0x821: 0x07, 0x822: 0x07, 0x823: 0x07, 0x824: 0x07, 0x825: 0x07, 0x826: 0x07, 0x827: 0x07,
+ 0x828: 0x07, 0x829: 0x07, 0x82a: 0x18, 0x82b: 0x19, 0x82c: 0x1a, 0x82d: 0x07, 0x82e: 0x1b, 0x82f: 0x1c,
+ 0x830: 0x07, 0x831: 0x1d, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b,
+ 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b,
+ 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b,
+ 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b,
+ 0x860: 0x0b, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b,
+ 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b,
+ 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b,
+ 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b,
+ // Block 0x22, offset 0x880
+ 0x880: 0x19e, 0x881: 0x19f, 0x882: 0xfd, 0x883: 0xfd, 0x884: 0x1a0, 0x885: 0x1a0, 0x886: 0x1a0, 0x887: 0x1a1,
+ 0x888: 0xfd, 0x889: 0xfd, 0x88a: 0xfd, 0x88b: 0xfd, 0x88c: 0xfd, 0x88d: 0xfd, 0x88e: 0xfd, 0x88f: 0xfd,
+ 0x890: 0xfd, 0x891: 0xfd, 0x892: 0xfd, 0x893: 0xfd, 0x894: 0xfd, 0x895: 0xfd, 0x896: 0xfd, 0x897: 0xfd,
+ 0x898: 0xfd, 0x899: 0xfd, 0x89a: 0xfd, 0x89b: 0xfd, 0x89c: 0xfd, 0x89d: 0xfd, 0x89e: 0xfd, 0x89f: 0xfd,
+ 0x8a0: 0xfd, 0x8a1: 0xfd, 0x8a2: 0xfd, 0x8a3: 0xfd, 0x8a4: 0xfd, 0x8a5: 0xfd, 0x8a6: 0xfd, 0x8a7: 0xfd,
+ 0x8a8: 0xfd, 0x8a9: 0xfd, 0x8aa: 0xfd, 0x8ab: 0xfd, 0x8ac: 0xfd, 0x8ad: 0xfd, 0x8ae: 0xfd, 0x8af: 0xfd,
+ 0x8b0: 0xfd, 0x8b1: 0xfd, 0x8b2: 0xfd, 0x8b3: 0xfd, 0x8b4: 0xfd, 0x8b5: 0xfd, 0x8b6: 0xfd, 0x8b7: 0xfd,
+ 0x8b8: 0xfd, 0x8b9: 0xfd, 0x8ba: 0xfd, 0x8bb: 0xfd, 0x8bc: 0xfd, 0x8bd: 0xfd, 0x8be: 0xfd, 0x8bf: 0xfd,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+ 0x8d0: 0x0b, 0x8d1: 0x0b, 0x8d2: 0x0b, 0x8d3: 0x0b, 0x8d4: 0x0b, 0x8d5: 0x0b, 0x8d6: 0x0b, 0x8d7: 0x0b,
+ 0x8d8: 0x0b, 0x8d9: 0x0b, 0x8da: 0x0b, 0x8db: 0x0b, 0x8dc: 0x0b, 0x8dd: 0x0b, 0x8de: 0x0b, 0x8df: 0x0b,
+ 0x8e0: 0x20, 0x8e1: 0x0b, 0x8e2: 0x0b, 0x8e3: 0x0b, 0x8e4: 0x0b, 0x8e5: 0x0b, 0x8e6: 0x0b, 0x8e7: 0x0b,
+ 0x8e8: 0x0b, 0x8e9: 0x0b, 0x8ea: 0x0b, 0x8eb: 0x0b, 0x8ec: 0x0b, 0x8ed: 0x0b, 0x8ee: 0x0b, 0x8ef: 0x0b,
+ 0x8f0: 0x0b, 0x8f1: 0x0b, 0x8f2: 0x0b, 0x8f3: 0x0b, 0x8f4: 0x0b, 0x8f5: 0x0b, 0x8f6: 0x0b, 0x8f7: 0x0b,
+ 0x8f8: 0x0b, 0x8f9: 0x0b, 0x8fa: 0x0b, 0x8fb: 0x0b, 0x8fc: 0x0b, 0x8fd: 0x0b, 0x8fe: 0x0b, 0x8ff: 0x0b,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0b, 0x901: 0x0b, 0x902: 0x0b, 0x903: 0x0b, 0x904: 0x0b, 0x905: 0x0b, 0x906: 0x0b, 0x907: 0x0b,
+ 0x908: 0x0b, 0x909: 0x0b, 0x90a: 0x0b, 0x90b: 0x0b, 0x90c: 0x0b, 0x90d: 0x0b, 0x90e: 0x0b, 0x90f: 0x0b,
+}
+
+// idnaSparseOffset: 292 entries, 584 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x85, 0x8b, 0x94, 0xa4, 0xb2, 0xbd, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x225, 0x22f, 0x23b, 0x247, 0x253, 0x25b, 0x260, 0x26d, 0x27e, 0x282, 0x28d, 0x291, 0x29a, 0x2a2, 0x2a8, 0x2ad, 0x2b0, 0x2b4, 0x2ba, 0x2be, 0x2c2, 0x2c6, 0x2cc, 0x2d4, 0x2db, 0x2e6, 0x2f0, 0x2f4, 0x2f7, 0x2fd, 0x301, 0x303, 0x306, 0x308, 0x30b, 0x315, 0x318, 0x327, 0x32b, 0x32f, 0x331, 0x33a, 0x33d, 0x341, 0x346, 0x34b, 0x351, 0x362, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f8, 0x3fd, 0x40a, 0x40e, 0x413, 0x415, 0x417, 0x41b, 0x41d, 0x421, 0x42a, 0x430, 0x434, 0x444, 0x44e, 0x453, 0x456, 0x45c, 0x463, 0x468, 0x46c, 0x472, 0x477, 0x480, 0x485, 0x48b, 0x492, 0x499, 0x4a0, 0x4a4, 0x4a9, 0x4ac, 0x4b1, 0x4bd, 0x4c3, 0x4c8, 0x4cf, 0x4d7, 0x4dc, 0x4e0, 0x4f0, 0x4f7, 0x4fb, 0x4ff, 0x506, 0x508, 0x50b, 0x50e, 0x512, 0x51b, 0x51f, 0x527, 0x52f, 0x537, 0x543, 0x54f, 0x555, 0x55e, 0x56a, 0x571, 0x57a, 0x585, 0x58c, 0x59b, 0x5a8, 0x5b5, 0x5be, 0x5c2, 0x5d1, 0x5d9, 0x5e4, 0x5ed, 0x5f3, 0x5fb, 0x604, 0x60f, 0x612, 0x61e, 0x627, 0x62a, 0x62f, 0x638, 0x63d, 0x64a, 0x655, 0x65e, 0x668, 0x66b, 0x675, 0x67e, 0x68a, 0x697, 0x6a4, 0x6b2, 0x6b9, 0x6bd, 0x6c1, 0x6c4, 0x6c9, 0x6cc, 0x6d1, 0x6d4, 0x6db, 0x6e2, 0x6e6, 0x6f1, 0x6f4, 0x6f7, 0x6fa, 0x700, 0x706, 0x70f, 0x712, 0x715, 0x718, 0x71b, 0x722, 0x725, 0x72a, 0x734, 0x737, 0x73b, 0x74a, 0x756, 0x75a, 0x75f, 0x763, 0x768, 0x76c, 0x771, 0x77a, 0x785, 0x78b, 0x791, 0x797, 0x79d, 0x7a6, 0x7a9, 0x7ac, 0x7b0, 0x7b4, 0x7b8, 0x7be, 0x7c4, 0x7c9, 0x7cc, 0x7dc, 0x7e3, 0x7e6, 0x7eb, 0x7ef, 0x7f5, 0x7fc, 0x800, 0x804, 0x80d, 0x814, 0x819, 0x81d, 0x82b, 0x82e, 0x831, 0x835, 0x839, 0x83c, 0x83f, 0x844, 0x846, 0x848}
+
+// idnaSparseValues: 2123 entries, 8492 bytes
+var idnaSparseValues = [2123]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x00a9, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x00b1, lo: 0xb2, hi: 0xb2},
+ {value: 0x00b9, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x00c1, lo: 0xb7, hi: 0xb7},
+ {value: 0x00c9, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x6, offset 0x33
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0131, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xae},
+ {value: 0x0808, lo: 0xaf, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x62
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbf},
+ // Block 0xc, offset 0x6c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x78
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xe, offset 0x85
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x94
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbd
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xca
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xdb
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x01f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0201, lo: 0x9c, hi: 0x9c},
+ {value: 0x0209, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x10a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x111
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12b
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x143
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x145
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x14a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x150
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x152
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x169
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x171
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x177
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x182
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x187
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x18a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x194
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x199
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x34, offset 0x1d3
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1de
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1eb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fe
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x206
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x216
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x222
+ {value: 0x0000, lo: 0x02},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0xbf},
+ // Block 0x3e, offset 0x225
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x247
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x253
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x25b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x260
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x02a9, lo: 0x80, hi: 0x80},
+ {value: 0x02b1, lo: 0x81, hi: 0x81},
+ {value: 0x02b9, lo: 0x82, hi: 0x82},
+ {value: 0x02c1, lo: 0x83, hi: 0x83},
+ {value: 0x02c9, lo: 0x84, hi: 0x85},
+ {value: 0x02d1, lo: 0x86, hi: 0x86},
+ {value: 0x02d9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x059d, lo: 0x90, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x059d, lo: 0xbd, hi: 0xbf},
+ // Block 0x45, offset 0x26d
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x46, offset 0x27e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x282
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x28d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x291
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x0851, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x29a
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0859, lo: 0xac, hi: 0xac},
+ {value: 0x0861, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x0869, lo: 0xaf, hi: 0xaf},
+ {value: 0x0871, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x2a2
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a8
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09dd, lo: 0xa9, hi: 0xa9},
+ {value: 0x09fd, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2ad
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2b0
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0929, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2b4
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e7e, lo: 0xb4, hi: 0xb4},
+ {value: 0x0932, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e9e, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2ba
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x0939, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2be
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2c2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0xbf},
+ // Block 0x53, offset 0x2c6
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ebd, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x54, offset 0x2cc
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2d4
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x56, offset 0x2db
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2e6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x58, offset 0x2f0
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x59, offset 0x2f4
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0xbf},
+ // Block 0x5a, offset 0x2f7
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0ef5, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5b, offset 0x2fd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0f15, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5c, offset 0x301
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f35, lo: 0x80, hi: 0xbf},
+ // Block 0x5d, offset 0x303
+ {value: 0x0020, lo: 0x02},
+ {value: 0x1735, lo: 0x80, hi: 0x8f},
+ {value: 0x1915, lo: 0x90, hi: 0xbf},
+ // Block 0x5e, offset 0x306
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1f15, lo: 0x80, hi: 0xbf},
+ // Block 0x5f, offset 0x308
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x60, offset 0x30b
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x096a, lo: 0x9b, hi: 0x9b},
+ {value: 0x0972, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x0979, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x61, offset 0x315
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x0981, lo: 0xbf, hi: 0xbf},
+ // Block 0x62, offset 0x318
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb0},
+ {value: 0x2a35, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a55, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a75, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a95, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a75, lo: 0xb5, hi: 0xb5},
+ {value: 0x2ab5, lo: 0xb6, hi: 0xb6},
+ {value: 0x2ad5, lo: 0xb7, hi: 0xb7},
+ {value: 0x2af5, lo: 0xb8, hi: 0xb9},
+ {value: 0x2b15, lo: 0xba, hi: 0xbb},
+ {value: 0x2b35, lo: 0xbc, hi: 0xbd},
+ {value: 0x2b15, lo: 0xbe, hi: 0xbf},
+ // Block 0x63, offset 0x327
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x64, offset 0x32b
+ {value: 0x0008, lo: 0x03},
+ {value: 0x098a, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0a82, lo: 0xa0, hi: 0xbf},
+ // Block 0x65, offset 0x32f
+ {value: 0x0008, lo: 0x01},
+ {value: 0x0d19, lo: 0x80, hi: 0xbf},
+ // Block 0x66, offset 0x331
+ {value: 0x0008, lo: 0x08},
+ {value: 0x0f19, lo: 0x80, hi: 0xb0},
+ {value: 0x4045, lo: 0xb1, hi: 0xb1},
+ {value: 0x10a1, lo: 0xb2, hi: 0xb3},
+ {value: 0x4065, lo: 0xb4, hi: 0xb4},
+ {value: 0x10b1, lo: 0xb5, hi: 0xb7},
+ {value: 0x4085, lo: 0xb8, hi: 0xb8},
+ {value: 0x4085, lo: 0xb9, hi: 0xb9},
+ {value: 0x10c9, lo: 0xba, hi: 0xbf},
+ // Block 0x67, offset 0x33a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x68, offset 0x33d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x69, offset 0x341
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x6a, offset 0x346
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6b, offset 0x34b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6c, offset 0x351
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0xe00d, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x83},
+ {value: 0x03f5, lo: 0x84, hi: 0x84},
+ {value: 0x0479, lo: 0x85, hi: 0x85},
+ {value: 0x447d, lo: 0x86, hi: 0x86},
+ {value: 0xe07d, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0xe01d, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0xb4},
+ {value: 0xe01d, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb7},
+ {value: 0x0741, lo: 0xb8, hi: 0xb8},
+ {value: 0x13f1, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6d, offset 0x362
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x3b08, lo: 0xac, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6e, offset 0x372
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6f, offset 0x378
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x70, offset 0x37c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x71, offset 0x38b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x72, offset 0x390
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x73, offset 0x398
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x74, offset 0x3a2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x75, offset 0x3ad
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x76, offset 0x3b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x77, offset 0x3c6
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x78, offset 0x3cf
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x79, offset 0x3df
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3ec
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x449d, lo: 0x9c, hi: 0x9c},
+ {value: 0x44b5, lo: 0x9d, hi: 0x9d},
+ {value: 0x0941, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa8},
+ {value: 0x13f9, lo: 0xa9, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x44cd, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3f8
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44ed, lo: 0x80, hi: 0x8f},
+ {value: 0x450d, lo: 0x90, hi: 0x9f},
+ {value: 0x452d, lo: 0xa0, hi: 0xaf},
+ {value: 0x450d, lo: 0xb0, hi: 0xbf},
+ // Block 0x7c, offset 0x3fd
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7d, offset 0x40a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7e, offset 0x40e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7f, offset 0x413
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x80, offset 0x415
+ {value: 0x0020, lo: 0x01},
+ {value: 0x454d, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x417
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d4d, lo: 0x80, hi: 0x94},
+ {value: 0x4b0d, lo: 0x95, hi: 0x95},
+ {value: 0x4fed, lo: 0x96, hi: 0xbf},
+ // Block 0x82, offset 0x41b
+ {value: 0x0020, lo: 0x01},
+ {value: 0x552d, lo: 0x80, hi: 0xbf},
+ // Block 0x83, offset 0x41d
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5d2d, lo: 0x80, hi: 0x84},
+ {value: 0x568d, lo: 0x85, hi: 0x85},
+ {value: 0x5dcd, lo: 0x86, hi: 0xbf},
+ // Block 0x84, offset 0x421
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b8d, lo: 0x80, hi: 0x8f},
+ {value: 0x6d4d, lo: 0x90, hi: 0x90},
+ {value: 0x6d8d, lo: 0x91, hi: 0xab},
+ {value: 0x1401, lo: 0xac, hi: 0xac},
+ {value: 0x70ed, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x710d, lo: 0xb0, hi: 0xbf},
+ // Block 0x85, offset 0x42a
+ {value: 0x0020, lo: 0x05},
+ {value: 0x730d, lo: 0x80, hi: 0xad},
+ {value: 0x656d, lo: 0xae, hi: 0xae},
+ {value: 0x78cd, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f8d, lo: 0xb6, hi: 0xb6},
+ {value: 0x79ad, lo: 0xb7, hi: 0xbf},
+ // Block 0x86, offset 0x430
+ {value: 0x0008, lo: 0x03},
+ {value: 0x1751, lo: 0x80, hi: 0x82},
+ {value: 0x1741, lo: 0x83, hi: 0x83},
+ {value: 0x1769, lo: 0x84, hi: 0xbf},
+ // Block 0x87, offset 0x434
+ {value: 0x0008, lo: 0x0f},
+ {value: 0x1d81, lo: 0x80, hi: 0x83},
+ {value: 0x1d99, lo: 0x84, hi: 0x85},
+ {value: 0x1da1, lo: 0x86, hi: 0x87},
+ {value: 0x1da9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x1de9, lo: 0x92, hi: 0x97},
+ {value: 0x1e11, lo: 0x98, hi: 0x9c},
+ {value: 0x1e31, lo: 0x9d, hi: 0xb3},
+ {value: 0x1d71, lo: 0xb4, hi: 0xb4},
+ {value: 0x1d81, lo: 0xb5, hi: 0xb5},
+ {value: 0x1ee9, lo: 0xb6, hi: 0xbb},
+ {value: 0x1f09, lo: 0xbc, hi: 0xbc},
+ {value: 0x1ef9, lo: 0xbd, hi: 0xbd},
+ {value: 0x1f19, lo: 0xbe, hi: 0xbf},
+ // Block 0x88, offset 0x444
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x89, offset 0x44e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x8a, offset 0x453
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x8b, offset 0x456
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8c, offset 0x45c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8d, offset 0x463
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8e, offset 0x468
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8f, offset 0x46c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x90, offset 0x472
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x91, offset 0x477
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x92, offset 0x480
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x93, offset 0x485
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x94, offset 0x48b
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8b0d, lo: 0x98, hi: 0x9f},
+ {value: 0x8b25, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x95, offset 0x492
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8b25, lo: 0xb0, hi: 0xb7},
+ {value: 0x8b0d, lo: 0xb8, hi: 0xbf},
+ // Block 0x96, offset 0x499
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x97, offset 0x4a0
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x98, offset 0x4a4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x99, offset 0x4a9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x9a, offset 0x4ac
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x9b, offset 0x4b1
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9c, offset 0x4bd
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9d, offset 0x4c3
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9e, offset 0x4c8
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9f, offset 0x4cf
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0xa0, offset 0x4d7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0xa1, offset 0x4dc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa2, offset 0x4e0
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa3, offset 0x4f0
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa4, offset 0x4f7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa5, offset 0x4fb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa6, offset 0x4ff
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa7, offset 0x506
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa8, offset 0x508
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa9, offset 0x50b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xaa, offset 0x50e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xab, offset 0x512
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0908, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0xa1},
+ {value: 0x0c08, lo: 0xa2, hi: 0xa2},
+ {value: 0x0a08, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xac, offset 0x51b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xad, offset 0x51f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xac},
+ {value: 0x0818, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xae, offset 0x527
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0xa6},
+ {value: 0x0808, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb3},
+ {value: 0x0a08, lo: 0xb4, hi: 0xbf},
+ // Block 0xaf, offset 0x52f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x84},
+ {value: 0x0808, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x90},
+ {value: 0x0a18, lo: 0x91, hi: 0x93},
+ {value: 0x0c18, lo: 0x94, hi: 0x94},
+ {value: 0x0818, lo: 0x95, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xb0, offset 0x537
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb3},
+ {value: 0x0c08, lo: 0xb4, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb7},
+ {value: 0x0a08, lo: 0xb8, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xba},
+ {value: 0x0a08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0c08, lo: 0xbd, hi: 0xbd},
+ {value: 0x0a08, lo: 0xbe, hi: 0xbf},
+ // Block 0xb1, offset 0x543
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0x81},
+ {value: 0x0c08, lo: 0x82, hi: 0x83},
+ {value: 0x0a08, lo: 0x84, hi: 0x84},
+ {value: 0x0818, lo: 0x85, hi: 0x88},
+ {value: 0x0c18, lo: 0x89, hi: 0x89},
+ {value: 0x0a18, lo: 0x8a, hi: 0x8a},
+ {value: 0x0918, lo: 0x8b, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb2, offset 0x54f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb3, offset 0x555
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xb4, offset 0x55e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xb5, offset 0x56a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb6, offset 0x571
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb7, offset 0x57a
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb8, offset 0x585
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb9, offset 0x58c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x3008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xba, offset 0x59b
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xbb, offset 0x5a8
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xbc, offset 0x5b5
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xbd, offset 0x5be
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xbe, offset 0x5c2
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xbf, offset 0x5d1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xc0, offset 0x5d9
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xc1, offset 0x5e4
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xc2, offset 0x5ed
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xc3, offset 0x5f3
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xc4, offset 0x5fb
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xc5, offset 0x604
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc6, offset 0x60f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc7, offset 0x612
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc8, offset 0x61e
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xc9, offset 0x627
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xca, offset 0x62a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xcb, offset 0x62f
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xcc, offset 0x638
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xcd, offset 0x63d
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x99},
+ {value: 0x3308, lo: 0x9a, hi: 0x9b},
+ {value: 0x3008, lo: 0x9c, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xbf},
+ // Block 0xce, offset 0x64a
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xcf, offset 0x655
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xd0, offset 0x65e
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xd1, offset 0x668
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd2, offset 0x66b
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xd3, offset 0x675
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xd4, offset 0x67e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xd5, offset 0x68a
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xd6, offset 0x697
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xd7, offset 0x6a4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x3008, lo: 0x93, hi: 0x94},
+ {value: 0x3308, lo: 0x95, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x96},
+ {value: 0x3b08, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xbf},
+ // Block 0xd8, offset 0x6b2
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd9, offset 0x6b9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0xda, offset 0x6bd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xdb, offset 0x6c1
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xdc, offset 0x6c4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xdd, offset 0x6c9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xde, offset 0x6cc
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0340, lo: 0xb0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xdf, offset 0x6d1
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xe0, offset 0x6d4
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xe1, offset 0x6db
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xe2, offset 0x6e2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xe3, offset 0x6e6
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xe4, offset 0x6f1
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xe5, offset 0x6f4
+ {value: 0x0000, lo: 0x02},
+ {value: 0xe105, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xe6, offset 0x6f7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0xe7, offset 0x6fa
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbf},
+ // Block 0xe8, offset 0x700
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xe9, offset 0x706
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xea, offset 0x70f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xeb, offset 0x712
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0xec, offset 0x715
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xed, offset 0x718
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xee, offset 0x71b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0xa3},
+ {value: 0x0008, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xef, offset 0x722
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xf0, offset 0x725
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xf1, offset 0x72a
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xf2, offset 0x734
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xf3, offset 0x737
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xf4, offset 0x73b
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0x2211, lo: 0x9e, hi: 0x9e},
+ {value: 0x2219, lo: 0x9f, hi: 0x9f},
+ {value: 0x2221, lo: 0xa0, hi: 0xa0},
+ {value: 0x2229, lo: 0xa1, hi: 0xa1},
+ {value: 0x2231, lo: 0xa2, hi: 0xa2},
+ {value: 0x2239, lo: 0xa3, hi: 0xa3},
+ {value: 0x2241, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xf5, offset 0x74a
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0x2249, lo: 0xbb, hi: 0xbb},
+ {value: 0x2251, lo: 0xbc, hi: 0xbc},
+ {value: 0x2259, lo: 0xbd, hi: 0xbd},
+ {value: 0x2261, lo: 0xbe, hi: 0xbe},
+ {value: 0x2269, lo: 0xbf, hi: 0xbf},
+ // Block 0xf6, offset 0x756
+ {value: 0x0000, lo: 0x03},
+ {value: 0x2271, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xf7, offset 0x75a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xf8, offset 0x75f
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf9, offset 0x763
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xfa, offset 0x768
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xfb, offset 0x76c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xfc, offset 0x771
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xfd, offset 0x77a
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xfe, offset 0x785
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xff, offset 0x78b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0x100, offset 0x791
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x101, offset 0x797
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x102, offset 0x79d
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0b08, lo: 0x8b, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x103, offset 0x7a6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xb0},
+ {value: 0x0818, lo: 0xb1, hi: 0xbf},
+ // Block 0x104, offset 0x7a9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0818, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x105, offset 0x7ac
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0818, lo: 0x81, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x106, offset 0x7b0
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0x107, offset 0x7b4
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x108, offset 0x7b8
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x109, offset 0x7be
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x10a, offset 0x7c4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0x2491, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x10b, offset 0x7c9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0x10c, offset 0x7cc
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x2611, lo: 0x80, hi: 0x80},
+ {value: 0x2619, lo: 0x81, hi: 0x81},
+ {value: 0x2621, lo: 0x82, hi: 0x82},
+ {value: 0x2629, lo: 0x83, hi: 0x83},
+ {value: 0x2631, lo: 0x84, hi: 0x84},
+ {value: 0x2639, lo: 0x85, hi: 0x85},
+ {value: 0x2641, lo: 0x86, hi: 0x86},
+ {value: 0x2649, lo: 0x87, hi: 0x87},
+ {value: 0x2651, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x2659, lo: 0x90, hi: 0x90},
+ {value: 0x2661, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0x10d, offset 0x7dc
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x10e, offset 0x7e3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x10f, offset 0x7e6
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x110, offset 0x7eb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x111, offset 0x7ef
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x112, offset 0x7f5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0x113, offset 0x7fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0x114, offset 0x800
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x115, offset 0x804
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x116, offset 0x80d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x117, offset 0x814
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x118, offset 0x819
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0xbf},
+ // Block 0x119, offset 0x81d
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0018, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0xaf},
+ {value: 0x06e1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0049, lo: 0xb1, hi: 0xb1},
+ {value: 0x0029, lo: 0xb2, hi: 0xb2},
+ {value: 0x0031, lo: 0xb3, hi: 0xb3},
+ {value: 0x06e9, lo: 0xb4, hi: 0xb4},
+ {value: 0x06f1, lo: 0xb5, hi: 0xb5},
+ {value: 0x06f9, lo: 0xb6, hi: 0xb6},
+ {value: 0x0701, lo: 0xb7, hi: 0xb7},
+ {value: 0x0709, lo: 0xb8, hi: 0xb8},
+ {value: 0x0711, lo: 0xb9, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x11a, offset 0x82b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x11b, offset 0x82e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x11c, offset 0x831
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x11d, offset 0x835
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x11e, offset 0x839
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x11f, offset 0x83c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0xbf},
+ // Block 0x120, offset 0x83f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x121, offset 0x844
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x122, offset 0x846
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x123, offset 0x848
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 44953 bytes (43KiB); checksum: D51909DD
diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go
new file mode 100644
index 0000000..5ff05fe
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables15.0.0.go
@@ -0,0 +1,5144 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+//go:build go1.21
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "15.0.0"
+
+var mappings string = "" + // Size: 6704 bytes
+ " ̈a ̄23 ́ ̧1o1⁄41⁄23⁄4i̇l·ʼnsdžⱥⱦhjrwy ̆ ̇ ̊ ̨ ̃ ̋lẍ́ ι; ̈́եւاٴوٴۇٴيٴक" +
+ "़ख़ग़ज़ड़ढ़फ़य़ড়ঢ়য়ਲ਼ਸ਼ਖ਼ਗ਼ਜ਼ਫ਼ଡ଼ଢ଼ําໍາຫນຫມགྷཌྷདྷབྷཛྷཀྵཱཱིུྲྀྲཱྀླྀླཱ" +
+ "ཱྀྀྒྷྜྷྡྷྦྷྫྷྐྵвдостъѣæbdeǝgikmnȣptuɐɑəɛɜŋɔɯvβγδφχρнɒcɕðfɟɡɥɨɩɪʝɭʟɱɰɲɳ" +
+ "ɴɵɸʂʃƫʉʊʋʌzʐʑʒθssάέήίόύώἀιἁιἂιἃιἄιἅιἆιἇιἠιἡιἢιἣιἤιἥιἦιἧιὠιὡιὢιὣιὤιὥιὦιὧ" +
+ "ιὰιαιάιᾶιι ̈͂ὴιηιήιῆι ̓̀ ̓́ ̓͂ΐ ̔̀ ̔́ ̔͂ΰ ̈̀`ὼιωιώιῶι′′′′′‵‵‵‵‵!!???!!?" +
+ "′′′′0456789+=()rsħnoqsmtmωåאבגדπ1⁄71⁄91⁄101⁄32⁄31⁄52⁄53⁄54⁄51⁄65⁄61⁄83" +
+ "⁄85⁄87⁄81⁄iiivviviiiixxi0⁄3∫∫∫∫∫∮∮∮∮∮1011121314151617181920(10)(11)(12" +
+ ")(13)(14)(15)(16)(17)(18)(19)(20)∫∫∫∫==⫝̸ɫɽȿɀ. ゙ ゚よりコト(ᄀ)(ᄂ)(ᄃ)(ᄅ)(ᄆ)(ᄇ)" +
+ "(ᄉ)(ᄋ)(ᄌ)(ᄎ)(ᄏ)(ᄐ)(ᄑ)(ᄒ)(가)(나)(다)(라)(마)(바)(사)(아)(자)(차)(카)(타)(파)(하)(주)(오전" +
+ ")(오후)(一)(二)(三)(四)(五)(六)(七)(八)(九)(十)(月)(火)(水)(木)(金)(土)(日)(株)(有)(社)(名)(特)(" +
+ "財)(祝)(労)(代)(呼)(学)(監)(企)(資)(協)(祭)(休)(自)(至)21222324252627282930313233343" +
+ "5참고주의3637383940414243444546474849501月2月3月4月5月6月7月8月9月10月11月12月hgev令和アパート" +
+ "アルファアンペアアールイニングインチウォンエスクードエーカーオンスオームカイリカラットカロリーガロンガンマギガギニーキュリーギルダーキロキロ" +
+ "グラムキロメートルキロワットグラムグラムトンクルゼイロクローネケースコルナコーポサイクルサンチームシリングセンチセントダースデシドルトンナノ" +
+ "ノットハイツパーセントパーツバーレルピアストルピクルピコビルファラッドフィートブッシェルフランヘクタールペソペニヒヘルツペンスページベータポ" +
+ "イントボルトホンポンドホールホーンマイクロマイルマッハマルクマンションミクロンミリミリバールメガメガトンメートルヤードヤールユアンリットルリ" +
+ "ラルピールーブルレムレントゲンワット0点1点2点3点4点5点6点7点8点9点10点11点12点13点14点15点16点17点18点19点20" +
+ "点21点22点23点24点daauovpcdmiu平成昭和大正明治株式会社panamakakbmbgbkcalpfnfmgkghzmldlk" +
+ "lfmnmmmcmkmm2m3m∕sm∕s2rad∕srad∕s2psnsmspvnvmvkvpwnwmwkwbqcccdc∕kgdbgyhah" +
+ "pinkkktlmlnlxphprsrsvwbv∕ma∕m1日2日3日4日5日6日7日8日9日10日11日12日13日14日15日16日17日1" +
+ "8日19日20日21日22日23日24日25日26日27日28日29日30日31日ьɦɬʞʇœʍ𤋮𢡊𢡄𣏕𥉉𥳐𧻓fffiflstմնմեմիվնմ" +
+ "խיִײַעהכלםרתשׁשׂשּׁשּׂאַאָאּבּגּדּהּוּזּטּיּךּכּלּמּנּסּףּפּצּקּרּשּתּו" +
+ "ֹבֿכֿפֿאלٱٻپڀٺٿٹڤڦڄڃچڇڍڌڎڈژڑکگڳڱںڻۀہھےۓڭۇۆۈۋۅۉېىئائەئوئۇئۆئۈئېئىیئجئحئم" +
+ "ئيبجبحبخبمبىبيتجتحتختمتىتيثجثمثىثيجحجمحجحمخجخحخمسجسحسخسمصحصمضجضحضخضمطحط" +
+ "مظمعجعمغجغمفجفحفخفمفىفيقحقمقىقيكاكجكحكخكلكمكىكيلجلحلخلملىليمجمحمخمممىمي" +
+ "نجنحنخنمنىنيهجهمهىهييجيحيخيميىييذٰرٰىٰ ٌّ ٍّ َّ ُّ ِّ ّٰئرئزئنبربزبنترت" +
+ "زتنثرثزثنمانرنزننيريزينئخئهبهتهصخلهنههٰيهثهسهشمشهـَّـُّـِّطىطيعىعيغىغيس" +
+ "ىسيشىشيحىحيجىجيخىخيصىصيضىضيشجشحشخشرسرصرضراًتجمتحجتحمتخمتمجتمحتمخجمححميح" +
+ "مىسحجسجحسجىسمحسمجسممصححصممشحمشجيشمخشممضحىضخمطمحطممطميعجمعممعمىغممغميغمى" +
+ "فخمقمحقمملحملحيلحىلججلخملمحمحجمحممحيمجحمجممخجمخممجخهمجهممنحمنحىنجمنجىنم" +
+ "ينمىيممبخيتجيتجىتخيتخىتميتمىجميجحىجمىسخىصحيشحيضحيلجيلمييحييجييميمميقمين" +
+ "حيعميكمينجحمخيلجمكممجحيحجيمجيفميبحيسخينجيصلےقلےاللهاكبرمحمدصلعمرسولعليه" +
+ "وسلمصلىصلى الله عليه وسلمجل جلالهریال,:!?_{}[]#&*-<>\\$%@ـًـَـُـِـّـْءآ" +
+ "أؤإئابةتثجحخدذرزسشصضطظعغفقكلمنهويلآلألإلا\x22'/^|~¢£¬¦¥ːˑʙɓʣꭦʥʤɖɗᶑɘɞʩɤɢ" +
+ "ɠʛʜɧʄʪʫꞎɮʎøɶɷɺɾʀʨʦꭧʧʈⱱʏʡʢʘǀǁǂ𝅗𝅥𝅘𝅥𝅘𝅥𝅮𝅘𝅥𝅯𝅘𝅥𝅰𝅘𝅥𝅱𝅘𝅥𝅲𝆹𝅥𝆺𝅥𝆹𝅥𝅮𝆺𝅥𝅮𝆹𝅥𝅯𝆺𝅥𝅯ıȷαεζηκ" +
+ "λμνξοστυψ∇∂ϝабгежзиклмпруфхцчшыэюꚉәіјөүӏґѕџҫꙑұٮڡٯ0,1,2,3,4,5,6,7,8,9,(a" +
+ ")(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)(r)(s)(t)(u)(v)(w)(x)(y" +
+ ")(z)〔s〕wzhvsdppvwcmcmdmrdjほかココサ手字双デ二多解天交映無料前後再新初終生販声吹演投捕一三遊左中右指走打禁空合満有月申" +
+ "割営配〔本〕〔三〕〔二〕〔安〕〔点〕〔打〕〔盗〕〔勝〕〔敗〕得可丽丸乁你侮侻倂偺備僧像㒞免兔兤具㒹內冗冤仌冬况凵刃㓟刻剆剷㔕勇勉勤勺包匆北卉" +
+ "卑博即卽卿灰及叟叫叱吆咞吸呈周咢哶唐啓啣善喙喫喳嗂圖嘆圗噑噴切壮城埴堍型堲報墬売壷夆夢奢姬娛娧姘婦㛮嬈嬾寃寘寧寳寿将尢㞁屠屮峀岍嵃嵮嵫嵼巡巢" +
+ "㠯巽帨帽幩㡢㡼庰庳庶廊廾舁弢㣇形彫㣣徚忍志忹悁㤺㤜悔惇慈慌慎慺憎憲憤憯懞懲懶成戛扝抱拔捐挽拼捨掃揤搢揅掩㨮摩摾撝摷㩬敏敬旣書晉㬙暑㬈㫤冒冕最" +
+ "暜肭䏙朗望朡杞杓㭉柺枅桒梅梎栟椔㮝楂榣槪檨櫛㰘次歔㱎歲殟殺殻汎沿泍汧洖派海流浩浸涅洴港湮㴳滋滇淹潮濆瀹瀞瀛㶖灊災灷炭煅熜爨爵牐犀犕獺王㺬玥㺸" +
+ "瑇瑜瑱璅瓊㼛甤甾異瘐㿼䀈直眞真睊䀹瞋䁆䂖硎碌磌䃣祖福秫䄯穀穊穏䈂篆築䈧糒䊠糨糣紀絣䌁緇縂繅䌴䍙罺羕翺者聠聰䏕育脃䐋脾媵舄辞䑫芑芋芝劳花芳芽苦" +
+ "若茝荣莭茣莽菧著荓菊菌菜䔫蓱蓳蔖蕤䕝䕡䕫虐虜虧虩蚩蚈蜎蛢蝹蜨蝫螆蟡蠁䗹衠衣裗裞䘵裺㒻䚾䛇誠諭變豕貫賁贛起跋趼跰軔輸邔郱鄑鄛鈸鋗鋘鉼鏹鐕開䦕閷" +
+ "䧦雃嶲霣䩮䩶韠䪲頋頩飢䬳餩馧駂駾䯎鬒鱀鳽䳎䳭鵧䳸麻䵖黹黾鼅鼏鼖鼻"
+
+var mappingIndex = []uint16{ // 1729 elements
+ // Entry 0 - 3F
+ 0x0000, 0x0000, 0x0001, 0x0004, 0x0005, 0x0008, 0x0009, 0x000a,
+ 0x000d, 0x0010, 0x0011, 0x0012, 0x0017, 0x001c, 0x0021, 0x0024,
+ 0x0027, 0x002a, 0x002b, 0x002e, 0x0031, 0x0034, 0x0035, 0x0036,
+ 0x0037, 0x0038, 0x0039, 0x003c, 0x003f, 0x0042, 0x0045, 0x0048,
+ 0x004b, 0x004c, 0x004d, 0x0051, 0x0054, 0x0055, 0x005a, 0x005e,
+ 0x0062, 0x0066, 0x006a, 0x006e, 0x0074, 0x007a, 0x0080, 0x0086,
+ 0x008c, 0x0092, 0x0098, 0x009e, 0x00a4, 0x00aa, 0x00b0, 0x00b6,
+ 0x00bc, 0x00c2, 0x00c8, 0x00ce, 0x00d4, 0x00da, 0x00e0, 0x00e6,
+ // Entry 40 - 7F
+ 0x00ec, 0x00f2, 0x00f8, 0x00fe, 0x0104, 0x010a, 0x0110, 0x0116,
+ 0x011c, 0x0122, 0x0128, 0x012e, 0x0137, 0x013d, 0x0146, 0x014c,
+ 0x0152, 0x0158, 0x015e, 0x0164, 0x016a, 0x0170, 0x0172, 0x0174,
+ 0x0176, 0x0178, 0x017a, 0x017c, 0x017e, 0x0180, 0x0181, 0x0182,
+ 0x0183, 0x0185, 0x0186, 0x0187, 0x0188, 0x0189, 0x018a, 0x018c,
+ 0x018d, 0x018e, 0x018f, 0x0191, 0x0193, 0x0195, 0x0197, 0x0199,
+ 0x019b, 0x019d, 0x019f, 0x01a0, 0x01a2, 0x01a4, 0x01a6, 0x01a8,
+ 0x01aa, 0x01ac, 0x01ae, 0x01b0, 0x01b1, 0x01b3, 0x01b5, 0x01b6,
+ // Entry 80 - BF
+ 0x01b8, 0x01ba, 0x01bc, 0x01be, 0x01c0, 0x01c2, 0x01c4, 0x01c6,
+ 0x01c8, 0x01ca, 0x01cc, 0x01ce, 0x01d0, 0x01d2, 0x01d4, 0x01d6,
+ 0x01d8, 0x01da, 0x01dc, 0x01de, 0x01e0, 0x01e2, 0x01e4, 0x01e5,
+ 0x01e7, 0x01e9, 0x01eb, 0x01ed, 0x01ef, 0x01f1, 0x01f3, 0x01f5,
+ 0x01f7, 0x01f9, 0x01fb, 0x01fd, 0x0202, 0x0207, 0x020c, 0x0211,
+ 0x0216, 0x021b, 0x0220, 0x0225, 0x022a, 0x022f, 0x0234, 0x0239,
+ 0x023e, 0x0243, 0x0248, 0x024d, 0x0252, 0x0257, 0x025c, 0x0261,
+ 0x0266, 0x026b, 0x0270, 0x0275, 0x027a, 0x027e, 0x0282, 0x0287,
+ // Entry C0 - FF
+ 0x0289, 0x028e, 0x0293, 0x0297, 0x029b, 0x02a0, 0x02a5, 0x02aa,
+ 0x02af, 0x02b1, 0x02b6, 0x02bb, 0x02c0, 0x02c2, 0x02c7, 0x02c8,
+ 0x02cd, 0x02d1, 0x02d5, 0x02da, 0x02e0, 0x02e9, 0x02ef, 0x02f8,
+ 0x02fa, 0x02fc, 0x02fe, 0x0300, 0x030c, 0x030d, 0x030e, 0x030f,
+ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
+ 0x0319, 0x031b, 0x031d, 0x031e, 0x0320, 0x0322, 0x0324, 0x0326,
+ 0x0328, 0x032a, 0x032c, 0x032e, 0x0330, 0x0335, 0x033a, 0x0340,
+ 0x0345, 0x034a, 0x034f, 0x0354, 0x0359, 0x035e, 0x0363, 0x0368,
+ // Entry 100 - 13F
+ 0x036d, 0x0372, 0x0377, 0x037c, 0x0380, 0x0382, 0x0384, 0x0386,
+ 0x038a, 0x038c, 0x038e, 0x0393, 0x0399, 0x03a2, 0x03a8, 0x03b1,
+ 0x03b3, 0x03b5, 0x03b7, 0x03b9, 0x03bb, 0x03bd, 0x03bf, 0x03c1,
+ 0x03c3, 0x03c5, 0x03c7, 0x03cb, 0x03cf, 0x03d3, 0x03d7, 0x03db,
+ 0x03df, 0x03e3, 0x03e7, 0x03eb, 0x03ef, 0x03f3, 0x03ff, 0x0401,
+ 0x0406, 0x0408, 0x040a, 0x040c, 0x040e, 0x040f, 0x0413, 0x0417,
+ 0x041d, 0x0423, 0x0428, 0x042d, 0x0432, 0x0437, 0x043c, 0x0441,
+ 0x0446, 0x044b, 0x0450, 0x0455, 0x045a, 0x045f, 0x0464, 0x0469,
+ // Entry 140 - 17F
+ 0x046e, 0x0473, 0x0478, 0x047d, 0x0482, 0x0487, 0x048c, 0x0491,
+ 0x0496, 0x049b, 0x04a0, 0x04a5, 0x04aa, 0x04af, 0x04b4, 0x04bc,
+ 0x04c4, 0x04c9, 0x04ce, 0x04d3, 0x04d8, 0x04dd, 0x04e2, 0x04e7,
+ 0x04ec, 0x04f1, 0x04f6, 0x04fb, 0x0500, 0x0505, 0x050a, 0x050f,
+ 0x0514, 0x0519, 0x051e, 0x0523, 0x0528, 0x052d, 0x0532, 0x0537,
+ 0x053c, 0x0541, 0x0546, 0x054b, 0x0550, 0x0555, 0x055a, 0x055f,
+ 0x0564, 0x0569, 0x056e, 0x0573, 0x0578, 0x057a, 0x057c, 0x057e,
+ 0x0580, 0x0582, 0x0584, 0x0586, 0x0588, 0x058a, 0x058c, 0x058e,
+ // Entry 180 - 1BF
+ 0x0590, 0x0592, 0x0594, 0x0596, 0x059c, 0x05a2, 0x05a4, 0x05a6,
+ 0x05a8, 0x05aa, 0x05ac, 0x05ae, 0x05b0, 0x05b2, 0x05b4, 0x05b6,
+ 0x05b8, 0x05ba, 0x05bc, 0x05be, 0x05c0, 0x05c4, 0x05c8, 0x05cc,
+ 0x05d0, 0x05d4, 0x05d8, 0x05dc, 0x05e0, 0x05e4, 0x05e9, 0x05ee,
+ 0x05f3, 0x05f5, 0x05f7, 0x05fd, 0x0609, 0x0615, 0x0621, 0x062a,
+ 0x0636, 0x063f, 0x0648, 0x0657, 0x0663, 0x066c, 0x0675, 0x067e,
+ 0x068a, 0x0696, 0x069f, 0x06a8, 0x06ae, 0x06b7, 0x06c3, 0x06cf,
+ 0x06d5, 0x06e4, 0x06f6, 0x0705, 0x070e, 0x071d, 0x072c, 0x0738,
+ // Entry 1C0 - 1FF
+ 0x0741, 0x074a, 0x0753, 0x075f, 0x076e, 0x077a, 0x0783, 0x078c,
+ 0x0795, 0x079b, 0x07a1, 0x07a7, 0x07ad, 0x07b6, 0x07bf, 0x07ce,
+ 0x07d7, 0x07e3, 0x07f2, 0x07fb, 0x0801, 0x0807, 0x0816, 0x0822,
+ 0x0831, 0x083a, 0x0849, 0x084f, 0x0858, 0x0861, 0x086a, 0x0873,
+ 0x087c, 0x0888, 0x0891, 0x0897, 0x08a0, 0x08a9, 0x08b2, 0x08be,
+ 0x08c7, 0x08d0, 0x08d9, 0x08e8, 0x08f4, 0x08fa, 0x0909, 0x090f,
+ 0x091b, 0x0927, 0x0930, 0x0939, 0x0942, 0x094e, 0x0954, 0x095d,
+ 0x0969, 0x096f, 0x097e, 0x0987, 0x098b, 0x098f, 0x0993, 0x0997,
+ // Entry 200 - 23F
+ 0x099b, 0x099f, 0x09a3, 0x09a7, 0x09ab, 0x09af, 0x09b4, 0x09b9,
+ 0x09be, 0x09c3, 0x09c8, 0x09cd, 0x09d2, 0x09d7, 0x09dc, 0x09e1,
+ 0x09e6, 0x09eb, 0x09f0, 0x09f5, 0x09fa, 0x09fc, 0x09fe, 0x0a00,
+ 0x0a02, 0x0a04, 0x0a06, 0x0a0c, 0x0a12, 0x0a18, 0x0a1e, 0x0a2a,
+ 0x0a2c, 0x0a2e, 0x0a30, 0x0a32, 0x0a34, 0x0a36, 0x0a38, 0x0a3c,
+ 0x0a3e, 0x0a40, 0x0a42, 0x0a44, 0x0a46, 0x0a48, 0x0a4a, 0x0a4c,
+ 0x0a4e, 0x0a50, 0x0a52, 0x0a54, 0x0a56, 0x0a58, 0x0a5a, 0x0a5f,
+ 0x0a65, 0x0a6c, 0x0a74, 0x0a76, 0x0a78, 0x0a7a, 0x0a7c, 0x0a7e,
+ // Entry 240 - 27F
+ 0x0a80, 0x0a82, 0x0a84, 0x0a86, 0x0a88, 0x0a8a, 0x0a8c, 0x0a8e,
+ 0x0a90, 0x0a96, 0x0a98, 0x0a9a, 0x0a9c, 0x0a9e, 0x0aa0, 0x0aa2,
+ 0x0aa4, 0x0aa6, 0x0aa8, 0x0aaa, 0x0aac, 0x0aae, 0x0ab0, 0x0ab2,
+ 0x0ab4, 0x0ab9, 0x0abe, 0x0ac2, 0x0ac6, 0x0aca, 0x0ace, 0x0ad2,
+ 0x0ad6, 0x0ada, 0x0ade, 0x0ae2, 0x0ae7, 0x0aec, 0x0af1, 0x0af6,
+ 0x0afb, 0x0b00, 0x0b05, 0x0b0a, 0x0b0f, 0x0b14, 0x0b19, 0x0b1e,
+ 0x0b23, 0x0b28, 0x0b2d, 0x0b32, 0x0b37, 0x0b3c, 0x0b41, 0x0b46,
+ 0x0b4b, 0x0b50, 0x0b52, 0x0b54, 0x0b56, 0x0b58, 0x0b5a, 0x0b5c,
+ // Entry 280 - 2BF
+ 0x0b5e, 0x0b62, 0x0b66, 0x0b6a, 0x0b6e, 0x0b72, 0x0b76, 0x0b7a,
+ 0x0b7c, 0x0b7e, 0x0b80, 0x0b82, 0x0b86, 0x0b8a, 0x0b8e, 0x0b92,
+ 0x0b96, 0x0b9a, 0x0b9e, 0x0ba0, 0x0ba2, 0x0ba4, 0x0ba6, 0x0ba8,
+ 0x0baa, 0x0bac, 0x0bb0, 0x0bb4, 0x0bba, 0x0bc0, 0x0bc4, 0x0bc8,
+ 0x0bcc, 0x0bd0, 0x0bd4, 0x0bd8, 0x0bdc, 0x0be0, 0x0be4, 0x0be8,
+ 0x0bec, 0x0bf0, 0x0bf4, 0x0bf8, 0x0bfc, 0x0c00, 0x0c04, 0x0c08,
+ 0x0c0c, 0x0c10, 0x0c14, 0x0c18, 0x0c1c, 0x0c20, 0x0c24, 0x0c28,
+ 0x0c2c, 0x0c30, 0x0c34, 0x0c36, 0x0c38, 0x0c3a, 0x0c3c, 0x0c3e,
+ // Entry 2C0 - 2FF
+ 0x0c40, 0x0c42, 0x0c44, 0x0c46, 0x0c48, 0x0c4a, 0x0c4c, 0x0c4e,
+ 0x0c50, 0x0c52, 0x0c54, 0x0c56, 0x0c58, 0x0c5a, 0x0c5c, 0x0c5e,
+ 0x0c60, 0x0c62, 0x0c64, 0x0c66, 0x0c68, 0x0c6a, 0x0c6c, 0x0c6e,
+ 0x0c70, 0x0c72, 0x0c74, 0x0c76, 0x0c78, 0x0c7a, 0x0c7c, 0x0c7e,
+ 0x0c80, 0x0c82, 0x0c86, 0x0c8a, 0x0c8e, 0x0c92, 0x0c96, 0x0c9a,
+ 0x0c9e, 0x0ca2, 0x0ca4, 0x0ca8, 0x0cac, 0x0cb0, 0x0cb4, 0x0cb8,
+ 0x0cbc, 0x0cc0, 0x0cc4, 0x0cc8, 0x0ccc, 0x0cd0, 0x0cd4, 0x0cd8,
+ 0x0cdc, 0x0ce0, 0x0ce4, 0x0ce8, 0x0cec, 0x0cf0, 0x0cf4, 0x0cf8,
+ // Entry 300 - 33F
+ 0x0cfc, 0x0d00, 0x0d04, 0x0d08, 0x0d0c, 0x0d10, 0x0d14, 0x0d18,
+ 0x0d1c, 0x0d20, 0x0d24, 0x0d28, 0x0d2c, 0x0d30, 0x0d34, 0x0d38,
+ 0x0d3c, 0x0d40, 0x0d44, 0x0d48, 0x0d4c, 0x0d50, 0x0d54, 0x0d58,
+ 0x0d5c, 0x0d60, 0x0d64, 0x0d68, 0x0d6c, 0x0d70, 0x0d74, 0x0d78,
+ 0x0d7c, 0x0d80, 0x0d84, 0x0d88, 0x0d8c, 0x0d90, 0x0d94, 0x0d98,
+ 0x0d9c, 0x0da0, 0x0da4, 0x0da8, 0x0dac, 0x0db0, 0x0db4, 0x0db8,
+ 0x0dbc, 0x0dc0, 0x0dc4, 0x0dc8, 0x0dcc, 0x0dd0, 0x0dd4, 0x0dd8,
+ 0x0ddc, 0x0de0, 0x0de4, 0x0de8, 0x0dec, 0x0df0, 0x0df4, 0x0df8,
+ // Entry 340 - 37F
+ 0x0dfc, 0x0e00, 0x0e04, 0x0e08, 0x0e0c, 0x0e10, 0x0e14, 0x0e18,
+ 0x0e1d, 0x0e22, 0x0e27, 0x0e2c, 0x0e31, 0x0e36, 0x0e3a, 0x0e3e,
+ 0x0e42, 0x0e46, 0x0e4a, 0x0e4e, 0x0e52, 0x0e56, 0x0e5a, 0x0e5e,
+ 0x0e62, 0x0e66, 0x0e6a, 0x0e6e, 0x0e72, 0x0e76, 0x0e7a, 0x0e7e,
+ 0x0e82, 0x0e86, 0x0e8a, 0x0e8e, 0x0e92, 0x0e96, 0x0e9a, 0x0e9e,
+ 0x0ea2, 0x0ea6, 0x0eaa, 0x0eae, 0x0eb2, 0x0eb6, 0x0ebc, 0x0ec2,
+ 0x0ec8, 0x0ecc, 0x0ed0, 0x0ed4, 0x0ed8, 0x0edc, 0x0ee0, 0x0ee4,
+ 0x0ee8, 0x0eec, 0x0ef0, 0x0ef4, 0x0ef8, 0x0efc, 0x0f00, 0x0f04,
+ // Entry 380 - 3BF
+ 0x0f08, 0x0f0c, 0x0f10, 0x0f14, 0x0f18, 0x0f1c, 0x0f20, 0x0f24,
+ 0x0f28, 0x0f2c, 0x0f30, 0x0f34, 0x0f38, 0x0f3e, 0x0f44, 0x0f4a,
+ 0x0f50, 0x0f56, 0x0f5c, 0x0f62, 0x0f68, 0x0f6e, 0x0f74, 0x0f7a,
+ 0x0f80, 0x0f86, 0x0f8c, 0x0f92, 0x0f98, 0x0f9e, 0x0fa4, 0x0faa,
+ 0x0fb0, 0x0fb6, 0x0fbc, 0x0fc2, 0x0fc8, 0x0fce, 0x0fd4, 0x0fda,
+ 0x0fe0, 0x0fe6, 0x0fec, 0x0ff2, 0x0ff8, 0x0ffe, 0x1004, 0x100a,
+ 0x1010, 0x1016, 0x101c, 0x1022, 0x1028, 0x102e, 0x1034, 0x103a,
+ 0x1040, 0x1046, 0x104c, 0x1052, 0x1058, 0x105e, 0x1064, 0x106a,
+ // Entry 3C0 - 3FF
+ 0x1070, 0x1076, 0x107c, 0x1082, 0x1088, 0x108e, 0x1094, 0x109a,
+ 0x10a0, 0x10a6, 0x10ac, 0x10b2, 0x10b8, 0x10be, 0x10c4, 0x10ca,
+ 0x10d0, 0x10d6, 0x10dc, 0x10e2, 0x10e8, 0x10ee, 0x10f4, 0x10fa,
+ 0x1100, 0x1106, 0x110c, 0x1112, 0x1118, 0x111e, 0x1124, 0x112a,
+ 0x1130, 0x1136, 0x113c, 0x1142, 0x1148, 0x114e, 0x1154, 0x115a,
+ 0x1160, 0x1166, 0x116c, 0x1172, 0x1178, 0x1180, 0x1188, 0x1190,
+ 0x1198, 0x11a0, 0x11a8, 0x11b0, 0x11b6, 0x11d7, 0x11e6, 0x11ee,
+ 0x11ef, 0x11f0, 0x11f1, 0x11f2, 0x11f3, 0x11f4, 0x11f5, 0x11f6,
+ // Entry 400 - 43F
+ 0x11f7, 0x11f8, 0x11f9, 0x11fa, 0x11fb, 0x11fc, 0x11fd, 0x11fe,
+ 0x11ff, 0x1200, 0x1201, 0x1205, 0x1209, 0x120d, 0x1211, 0x1215,
+ 0x1219, 0x121b, 0x121d, 0x121f, 0x1221, 0x1223, 0x1225, 0x1227,
+ 0x1229, 0x122b, 0x122d, 0x122f, 0x1231, 0x1233, 0x1235, 0x1237,
+ 0x1239, 0x123b, 0x123d, 0x123f, 0x1241, 0x1243, 0x1245, 0x1247,
+ 0x1249, 0x124b, 0x124d, 0x124f, 0x1251, 0x1253, 0x1255, 0x1257,
+ 0x1259, 0x125b, 0x125d, 0x125f, 0x1263, 0x1267, 0x126b, 0x126f,
+ 0x1270, 0x1271, 0x1272, 0x1273, 0x1274, 0x1275, 0x1277, 0x1279,
+ // Entry 440 - 47F
+ 0x127b, 0x127d, 0x127f, 0x1281, 0x1283, 0x1285, 0x1287, 0x1289,
+ 0x128c, 0x128e, 0x1290, 0x1292, 0x1294, 0x1297, 0x1299, 0x129b,
+ 0x129d, 0x129f, 0x12a1, 0x12a3, 0x12a5, 0x12a7, 0x12a9, 0x12ab,
+ 0x12ad, 0x12af, 0x12b2, 0x12b4, 0x12b6, 0x12b8, 0x12ba, 0x12bc,
+ 0x12be, 0x12c0, 0x12c2, 0x12c4, 0x12c6, 0x12c9, 0x12cb, 0x12cd,
+ 0x12d0, 0x12d2, 0x12d4, 0x12d6, 0x12d8, 0x12da, 0x12dc, 0x12de,
+ 0x12e6, 0x12ee, 0x12fa, 0x1306, 0x1312, 0x131e, 0x132a, 0x1332,
+ 0x133a, 0x1346, 0x1352, 0x135e, 0x136a, 0x136c, 0x136e, 0x1370,
+ // Entry 480 - 4BF
+ 0x1372, 0x1374, 0x1376, 0x1378, 0x137a, 0x137c, 0x137e, 0x1380,
+ 0x1382, 0x1384, 0x1386, 0x1388, 0x138a, 0x138d, 0x1390, 0x1392,
+ 0x1394, 0x1396, 0x1398, 0x139a, 0x139c, 0x139e, 0x13a0, 0x13a2,
+ 0x13a4, 0x13a6, 0x13a8, 0x13aa, 0x13ac, 0x13ae, 0x13b0, 0x13b2,
+ 0x13b4, 0x13b6, 0x13b8, 0x13ba, 0x13bc, 0x13bf, 0x13c1, 0x13c3,
+ 0x13c5, 0x13c7, 0x13c9, 0x13cb, 0x13cd, 0x13cf, 0x13d1, 0x13d3,
+ 0x13d6, 0x13d8, 0x13da, 0x13dc, 0x13de, 0x13e0, 0x13e2, 0x13e4,
+ 0x13e6, 0x13e8, 0x13ea, 0x13ec, 0x13ee, 0x13f0, 0x13f2, 0x13f5,
+ // Entry 4C0 - 4FF
+ 0x13f8, 0x13fb, 0x13fe, 0x1401, 0x1404, 0x1407, 0x140a, 0x140d,
+ 0x1410, 0x1413, 0x1416, 0x1419, 0x141c, 0x141f, 0x1422, 0x1425,
+ 0x1428, 0x142b, 0x142e, 0x1431, 0x1434, 0x1437, 0x143a, 0x143d,
+ 0x1440, 0x1447, 0x1449, 0x144b, 0x144d, 0x1450, 0x1452, 0x1454,
+ 0x1456, 0x1458, 0x145a, 0x1460, 0x1466, 0x1469, 0x146c, 0x146f,
+ 0x1472, 0x1475, 0x1478, 0x147b, 0x147e, 0x1481, 0x1484, 0x1487,
+ 0x148a, 0x148d, 0x1490, 0x1493, 0x1496, 0x1499, 0x149c, 0x149f,
+ 0x14a2, 0x14a5, 0x14a8, 0x14ab, 0x14ae, 0x14b1, 0x14b4, 0x14b7,
+ // Entry 500 - 53F
+ 0x14ba, 0x14bd, 0x14c0, 0x14c3, 0x14c6, 0x14c9, 0x14cc, 0x14cf,
+ 0x14d2, 0x14d5, 0x14d8, 0x14db, 0x14de, 0x14e1, 0x14e4, 0x14e7,
+ 0x14ea, 0x14ed, 0x14f6, 0x14ff, 0x1508, 0x1511, 0x151a, 0x1523,
+ 0x152c, 0x1535, 0x153e, 0x1541, 0x1544, 0x1547, 0x154a, 0x154d,
+ 0x1550, 0x1553, 0x1556, 0x1559, 0x155c, 0x155f, 0x1562, 0x1565,
+ 0x1568, 0x156b, 0x156e, 0x1571, 0x1574, 0x1577, 0x157a, 0x157d,
+ 0x1580, 0x1583, 0x1586, 0x1589, 0x158c, 0x158f, 0x1592, 0x1595,
+ 0x1598, 0x159b, 0x159e, 0x15a1, 0x15a4, 0x15a7, 0x15aa, 0x15ad,
+ // Entry 540 - 57F
+ 0x15b0, 0x15b3, 0x15b6, 0x15b9, 0x15bc, 0x15bf, 0x15c2, 0x15c5,
+ 0x15c8, 0x15cb, 0x15ce, 0x15d1, 0x15d4, 0x15d7, 0x15da, 0x15dd,
+ 0x15e0, 0x15e3, 0x15e6, 0x15e9, 0x15ec, 0x15ef, 0x15f2, 0x15f5,
+ 0x15f8, 0x15fb, 0x15fe, 0x1601, 0x1604, 0x1607, 0x160a, 0x160d,
+ 0x1610, 0x1613, 0x1616, 0x1619, 0x161c, 0x161f, 0x1622, 0x1625,
+ 0x1628, 0x162b, 0x162e, 0x1631, 0x1634, 0x1637, 0x163a, 0x163d,
+ 0x1640, 0x1643, 0x1646, 0x1649, 0x164c, 0x164f, 0x1652, 0x1655,
+ 0x1658, 0x165b, 0x165e, 0x1661, 0x1664, 0x1667, 0x166a, 0x166d,
+ // Entry 580 - 5BF
+ 0x1670, 0x1673, 0x1676, 0x1679, 0x167c, 0x167f, 0x1682, 0x1685,
+ 0x1688, 0x168b, 0x168e, 0x1691, 0x1694, 0x1697, 0x169a, 0x169d,
+ 0x16a0, 0x16a3, 0x16a6, 0x16a9, 0x16ac, 0x16af, 0x16b2, 0x16b5,
+ 0x16b8, 0x16bb, 0x16be, 0x16c1, 0x16c4, 0x16c7, 0x16ca, 0x16cd,
+ 0x16d0, 0x16d3, 0x16d6, 0x16d9, 0x16dc, 0x16df, 0x16e2, 0x16e5,
+ 0x16e8, 0x16eb, 0x16ee, 0x16f1, 0x16f4, 0x16f7, 0x16fa, 0x16fd,
+ 0x1700, 0x1703, 0x1706, 0x1709, 0x170c, 0x170f, 0x1712, 0x1715,
+ 0x1718, 0x171b, 0x171e, 0x1721, 0x1724, 0x1727, 0x172a, 0x172d,
+ // Entry 5C0 - 5FF
+ 0x1730, 0x1733, 0x1736, 0x1739, 0x173c, 0x173f, 0x1742, 0x1745,
+ 0x1748, 0x174b, 0x174e, 0x1751, 0x1754, 0x1757, 0x175a, 0x175d,
+ 0x1760, 0x1763, 0x1766, 0x1769, 0x176c, 0x176f, 0x1772, 0x1775,
+ 0x1778, 0x177b, 0x177e, 0x1781, 0x1784, 0x1787, 0x178a, 0x178d,
+ 0x1790, 0x1793, 0x1796, 0x1799, 0x179c, 0x179f, 0x17a2, 0x17a5,
+ 0x17a8, 0x17ab, 0x17ae, 0x17b1, 0x17b4, 0x17b7, 0x17ba, 0x17bd,
+ 0x17c0, 0x17c3, 0x17c6, 0x17c9, 0x17cc, 0x17cf, 0x17d2, 0x17d5,
+ 0x17d8, 0x17db, 0x17de, 0x17e1, 0x17e4, 0x17e7, 0x17ea, 0x17ed,
+ // Entry 600 - 63F
+ 0x17f0, 0x17f3, 0x17f6, 0x17f9, 0x17fc, 0x17ff, 0x1802, 0x1805,
+ 0x1808, 0x180b, 0x180e, 0x1811, 0x1814, 0x1817, 0x181a, 0x181d,
+ 0x1820, 0x1823, 0x1826, 0x1829, 0x182c, 0x182f, 0x1832, 0x1835,
+ 0x1838, 0x183b, 0x183e, 0x1841, 0x1844, 0x1847, 0x184a, 0x184d,
+ 0x1850, 0x1853, 0x1856, 0x1859, 0x185c, 0x185f, 0x1862, 0x1865,
+ 0x1868, 0x186b, 0x186e, 0x1871, 0x1874, 0x1877, 0x187a, 0x187d,
+ 0x1880, 0x1883, 0x1886, 0x1889, 0x188c, 0x188f, 0x1892, 0x1895,
+ 0x1898, 0x189b, 0x189e, 0x18a1, 0x18a4, 0x18a7, 0x18aa, 0x18ad,
+ // Entry 640 - 67F
+ 0x18b0, 0x18b3, 0x18b6, 0x18b9, 0x18bc, 0x18bf, 0x18c2, 0x18c5,
+ 0x18c8, 0x18cb, 0x18ce, 0x18d1, 0x18d4, 0x18d7, 0x18da, 0x18dd,
+ 0x18e0, 0x18e3, 0x18e6, 0x18e9, 0x18ec, 0x18ef, 0x18f2, 0x18f5,
+ 0x18f8, 0x18fb, 0x18fe, 0x1901, 0x1904, 0x1907, 0x190a, 0x190d,
+ 0x1910, 0x1913, 0x1916, 0x1919, 0x191c, 0x191f, 0x1922, 0x1925,
+ 0x1928, 0x192b, 0x192e, 0x1931, 0x1934, 0x1937, 0x193a, 0x193d,
+ 0x1940, 0x1943, 0x1946, 0x1949, 0x194c, 0x194f, 0x1952, 0x1955,
+ 0x1958, 0x195b, 0x195e, 0x1961, 0x1964, 0x1967, 0x196a, 0x196d,
+ // Entry 680 - 6BF
+ 0x1970, 0x1973, 0x1976, 0x1979, 0x197c, 0x197f, 0x1982, 0x1985,
+ 0x1988, 0x198b, 0x198e, 0x1991, 0x1994, 0x1997, 0x199a, 0x199d,
+ 0x19a0, 0x19a3, 0x19a6, 0x19a9, 0x19ac, 0x19af, 0x19b2, 0x19b5,
+ 0x19b8, 0x19bb, 0x19be, 0x19c1, 0x19c4, 0x19c7, 0x19ca, 0x19cd,
+ 0x19d0, 0x19d3, 0x19d6, 0x19d9, 0x19dc, 0x19df, 0x19e2, 0x19e5,
+ 0x19e8, 0x19eb, 0x19ee, 0x19f1, 0x19f4, 0x19f7, 0x19fa, 0x19fd,
+ 0x1a00, 0x1a03, 0x1a06, 0x1a09, 0x1a0c, 0x1a0f, 0x1a12, 0x1a15,
+ 0x1a18, 0x1a1b, 0x1a1e, 0x1a21, 0x1a24, 0x1a27, 0x1a2a, 0x1a2d,
+ // Entry 6C0 - 6FF
+ 0x1a30,
+} // Size: 3482 bytes
+
+var xorData string = "" + // Size: 4907 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" +
+ "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" +
+ "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" +
+ "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" +
+ "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" +
+ "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" +
+ "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" +
+ "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" +
+ "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" +
+ "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" +
+ "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" +
+ "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" +
+ "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" +
+ "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" +
+ "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" +
+ "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" +
+ "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" +
+ "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" +
+ "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" +
+ "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" +
+ "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" +
+ "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" +
+ "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" +
+ "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" +
+ "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" +
+ "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" +
+ "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" +
+ "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" +
+ "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" +
+ "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" +
+ "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" +
+ "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" +
+ "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" +
+ "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" +
+ "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" +
+ "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" +
+ "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" +
+ "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" +
+ "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" +
+ "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" +
+ "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" +
+ "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" +
+ "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" +
+ "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" +
+ "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" +
+ "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" +
+ "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" +
+ "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" +
+ "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." +
+ "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" +
+ "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" +
+ "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" +
+ "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" +
+ "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" +
+ "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" +
+ "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" +
+ "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03" +
+ "\x09\x0c\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06" +
+ "!3\x03\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05" +
+ "\x03\x07<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" +
+ "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" +
+ "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" +
+ "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" +
+ "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" +
+ "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" +
+ ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" +
+ "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" +
+ "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" +
+ "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" +
+ "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" +
+ "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" +
+ "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" +
+ "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" +
+ "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" +
+ "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" +
+ "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" +
+ "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" +
+ ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x03'\x02\x03)\x02\x03+" +
+ "\x02\x03/\x02\x03\x19\x02\x03\x1b\x02\x03\x1f\x03\x0d\x22\x18\x03\x0d" +
+ "\x22\x1a\x03\x0d\x22'\x03\x0d\x22/\x03\x0d\x223\x03\x0d\x22$\x02\x01\x1e" +
+ "\x03\x0f$!\x03\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08" +
+ "\x18\x03\x0f\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$" +
+ "\x03\x0e\x0d)\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d" +
+ "\x03\x0d. \x03\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03" +
+ "\x0d\x0d\x0f\x03\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03" +
+ "\x0c\x09:\x03\x0e\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18" +
+ "\x03\x0c\x1f\x1c\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03" +
+ "\x0b<+\x03\x0b8\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d" +
+ "\x22&\x03\x0b\x1a\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03" +
+ "\x0a!\x1a\x03\x0a!7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03" +
+ "\x0a\x00 \x03\x0a\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a" +
+ "\x1b-\x03\x09-\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091" +
+ "\x1f\x03\x093\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(" +
+ "\x16\x03\x09\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!" +
+ "\x03\x09\x1a\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03" +
+ "\x08\x02*\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03" +
+ "\x070\x0c\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x06" +
+ "71\x03\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 " +
+ "\x1d\x03\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 31598 bytes (30.86 KiB). Checksum: d3118eda0d6b5360.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 133:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 133
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 135 blocks, 8640 entries, 17280 bytes
+// The third block is the zero block.
+var idnaValues = [8640]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x0012, 0xe9: 0x0018,
+ 0xea: 0x0019, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x0022,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0029, 0xf3: 0x0031, 0xf4: 0x003a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x0042, 0xf9: 0x0049, 0xfa: 0x0051, 0xfb: 0x0018,
+ 0xfc: 0x0059, 0xfd: 0x0061, 0xfe: 0x0069, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0071, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0079,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0079, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x0081, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x0089,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x0091, 0x1c5: 0x0091,
+ 0x1c6: 0x0091, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0099, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x00a1, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x00d2, 0x259: 0x00da, 0x25a: 0x00e2, 0x25b: 0x00ea, 0x25c: 0x00f2, 0x25d: 0x00fa,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0101, 0x262: 0x0089, 0x263: 0x0109,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0111, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x011a, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x0122, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x003a, 0x2c5: 0x012a,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0818,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0139,
+ 0x4b6: 0x0141, 0x4b7: 0x0149, 0x4b8: 0x0151, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0c08, 0x557: 0x0c08,
+ 0x558: 0x0c08, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0c08, 0x571: 0x0c08, 0x572: 0x0c08, 0x573: 0x0c08, 0x574: 0x0c08, 0x575: 0x0c08,
+ 0x576: 0x0c08, 0x577: 0x0c08, 0x578: 0x0c08, 0x579: 0x0c08, 0x57a: 0x0c08, 0x57b: 0x0c08,
+ 0x57c: 0x0c08, 0x57d: 0x0c08, 0x57e: 0x0c08, 0x57f: 0x0c08,
+ // Block 0x16, offset 0x580
+ 0x580: 0x0c08, 0x581: 0x0c08, 0x582: 0x0c08, 0x583: 0x0808, 0x584: 0x0808, 0x585: 0x0808,
+ 0x586: 0x0a08, 0x587: 0x0808, 0x588: 0x0818, 0x589: 0x0a08, 0x58a: 0x0a08, 0x58b: 0x0a08,
+ 0x58c: 0x0a08, 0x58d: 0x0a08, 0x58e: 0x0c08, 0x58f: 0x0040, 0x590: 0x0840, 0x591: 0x0840,
+ 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x0040,
+ 0x598: 0x3308, 0x599: 0x3308, 0x59a: 0x3308, 0x59b: 0x3308, 0x59c: 0x3308, 0x59d: 0x3308,
+ 0x59e: 0x3308, 0x59f: 0x3308, 0x5a0: 0x0a08, 0x5a1: 0x0a08, 0x5a2: 0x0a08, 0x5a3: 0x0a08,
+ 0x5a4: 0x0a08, 0x5a5: 0x0a08, 0x5a6: 0x0a08, 0x5a7: 0x0a08, 0x5a8: 0x0a08, 0x5a9: 0x0a08,
+ 0x5aa: 0x0c08, 0x5ab: 0x0c08, 0x5ac: 0x0c08, 0x5ad: 0x0808, 0x5ae: 0x0c08, 0x5af: 0x0a08,
+ 0x5b0: 0x0a08, 0x5b1: 0x0c08, 0x5b2: 0x0c08, 0x5b3: 0x0a08, 0x5b4: 0x0a08, 0x5b5: 0x0a08,
+ 0x5b6: 0x0a08, 0x5b7: 0x0a08, 0x5b8: 0x0a08, 0x5b9: 0x0c08, 0x5ba: 0x0a08, 0x5bb: 0x0a08,
+ 0x5bc: 0x0a08, 0x5bd: 0x0a08, 0x5be: 0x0a08, 0x5bf: 0x0a08,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x3308,
+ 0x5c6: 0x3308, 0x5c7: 0x3308, 0x5c8: 0x3308, 0x5c9: 0x3008, 0x5ca: 0x3008, 0x5cb: 0x3008,
+ 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x3008, 0x5cf: 0x3008, 0x5d0: 0x0008, 0x5d1: 0x3308,
+ 0x5d2: 0x3308, 0x5d3: 0x3308, 0x5d4: 0x3308, 0x5d5: 0x3308, 0x5d6: 0x3308, 0x5d7: 0x3308,
+ 0x5d8: 0x0159, 0x5d9: 0x0161, 0x5da: 0x0169, 0x5db: 0x0171, 0x5dc: 0x0179, 0x5dd: 0x0181,
+ 0x5de: 0x0189, 0x5df: 0x0191, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308,
+ 0x5e4: 0x0018, 0x5e5: 0x0018, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0018, 0x5f1: 0x0008, 0x5f2: 0x0008, 0x5f3: 0x0008, 0x5f4: 0x0008, 0x5f5: 0x0008,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0008, 0x5fb: 0x0008,
+ 0x5fc: 0x0008, 0x5fd: 0x0008, 0x5fe: 0x0008, 0x5ff: 0x0008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x0008, 0x601: 0x3308, 0x602: 0x3008, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008,
+ 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0008,
+ 0x60c: 0x0008, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008,
+ 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008,
+ 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008,
+ 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0040, 0x634: 0x0040, 0x635: 0x0040,
+ 0x636: 0x0008, 0x637: 0x0008, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040,
+ 0x63c: 0x3308, 0x63d: 0x0008, 0x63e: 0x3008, 0x63f: 0x3008,
+ // Block 0x19, offset 0x640
+ 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3308, 0x644: 0x3308, 0x645: 0x0040,
+ 0x646: 0x0040, 0x647: 0x3008, 0x648: 0x3008, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3008,
+ 0x64c: 0x3008, 0x64d: 0x3b08, 0x64e: 0x0008, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x3008,
+ 0x658: 0x0040, 0x659: 0x0040, 0x65a: 0x0040, 0x65b: 0x0040, 0x65c: 0x0199, 0x65d: 0x01a1,
+ 0x65e: 0x0040, 0x65f: 0x01a9, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x3308, 0x663: 0x3308,
+ 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0008, 0x672: 0x0018, 0x673: 0x0018, 0x674: 0x0018, 0x675: 0x0018,
+ 0x676: 0x0018, 0x677: 0x0018, 0x678: 0x0018, 0x679: 0x0018, 0x67a: 0x0018, 0x67b: 0x0018,
+ 0x67c: 0x0008, 0x67d: 0x0018, 0x67e: 0x3308, 0x67f: 0x0040,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008,
+ 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0040,
+ 0x68c: 0x0040, 0x68d: 0x0040, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0040,
+ 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008,
+ 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008,
+ 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008,
+ 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x01b1, 0x6b4: 0x0040, 0x6b5: 0x0008,
+ 0x6b6: 0x01b9, 0x6b7: 0x0040, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x3308, 0x6bd: 0x0040, 0x6be: 0x3008, 0x6bf: 0x3008,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x0040, 0x6c4: 0x0040, 0x6c5: 0x0040,
+ 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x0040, 0x6ca: 0x0040, 0x6cb: 0x3308,
+ 0x6cc: 0x3308, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0040, 0x6d1: 0x3308,
+ 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040,
+ 0x6d8: 0x0040, 0x6d9: 0x01c1, 0x6da: 0x01c9, 0x6db: 0x01d1, 0x6dc: 0x0008, 0x6dd: 0x0040,
+ 0x6de: 0x01d9, 0x6df: 0x0040, 0x6e0: 0x0040, 0x6e1: 0x0040, 0x6e2: 0x0040, 0x6e3: 0x0040,
+ 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x3308, 0x6f1: 0x3308, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0008, 0x6f5: 0x3308,
+ 0x6f6: 0x0018, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0040, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008,
+ 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008,
+ 0x70c: 0x0008, 0x70d: 0x0008, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0008,
+ 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008,
+ 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008,
+ 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008,
+ 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008,
+ 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040,
+ 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3008,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x3308,
+ 0x746: 0x0040, 0x747: 0x3308, 0x748: 0x3308, 0x749: 0x3008, 0x74a: 0x0040, 0x74b: 0x3008,
+ 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x0040, 0x757: 0x0040,
+ 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0040, 0x75d: 0x0040,
+ 0x75e: 0x0040, 0x75f: 0x0040, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308,
+ 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0018, 0x771: 0x0018, 0x772: 0x0040, 0x773: 0x0040, 0x774: 0x0040, 0x775: 0x0040,
+ 0x776: 0x0040, 0x777: 0x0040, 0x778: 0x0040, 0x779: 0x0008, 0x77a: 0x3308, 0x77b: 0x3308,
+ 0x77c: 0x3308, 0x77d: 0x3308, 0x77e: 0x3308, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x0040, 0x781: 0x3308, 0x782: 0x3008, 0x783: 0x3008, 0x784: 0x0040, 0x785: 0x0008,
+ 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0008,
+ 0x78c: 0x0008, 0x78d: 0x0040, 0x78e: 0x0040, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0008, 0x797: 0x0008,
+ 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0008, 0x79c: 0x0008, 0x79d: 0x0008,
+ 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x0008, 0x7a3: 0x0008,
+ 0x7a4: 0x0008, 0x7a5: 0x0008, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0040,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0008, 0x7b1: 0x0040, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0040, 0x7b5: 0x0008,
+ 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x3308, 0x7bd: 0x0008, 0x7be: 0x3008, 0x7bf: 0x3308,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x3008, 0x7c1: 0x3308, 0x7c2: 0x3308, 0x7c3: 0x3308, 0x7c4: 0x3308, 0x7c5: 0x0040,
+ 0x7c6: 0x0040, 0x7c7: 0x3008, 0x7c8: 0x3008, 0x7c9: 0x0040, 0x7ca: 0x0040, 0x7cb: 0x3008,
+ 0x7cc: 0x3008, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040,
+ 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x3008,
+ 0x7d8: 0x0040, 0x7d9: 0x0040, 0x7da: 0x0040, 0x7db: 0x0040, 0x7dc: 0x01e1, 0x7dd: 0x01e9,
+ 0x7de: 0x0040, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308,
+ 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0018, 0x7f1: 0x0008, 0x7f2: 0x0018, 0x7f3: 0x0018, 0x7f4: 0x0018, 0x7f5: 0x0018,
+ 0x7f6: 0x0018, 0x7f7: 0x0018, 0x7f8: 0x0040, 0x7f9: 0x0040, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x0040, 0x7ff: 0x0040,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0040, 0x801: 0x0040, 0x802: 0x3308, 0x803: 0x0008, 0x804: 0x0040, 0x805: 0x0008,
+ 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0040,
+ 0x80c: 0x0040, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040,
+ 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0040, 0x817: 0x0040,
+ 0x818: 0x0040, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0008, 0x81d: 0x0040,
+ 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0040, 0x821: 0x0040, 0x822: 0x0040, 0x823: 0x0008,
+ 0x824: 0x0008, 0x825: 0x0040, 0x826: 0x0040, 0x827: 0x0040, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0040, 0x82c: 0x0040, 0x82d: 0x0040, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0008, 0x835: 0x0008,
+ 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040,
+ 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x3008, 0x83f: 0x3008,
+ // Block 0x21, offset 0x840
+ 0x840: 0x3308, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040,
+ 0x846: 0x3308, 0x847: 0x3308, 0x848: 0x3308, 0x849: 0x0040, 0x84a: 0x3308, 0x84b: 0x3308,
+ 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040,
+ 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3308, 0x856: 0x3308, 0x857: 0x0040,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0008,
+ 0x85e: 0x0040, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308,
+ 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0040, 0x871: 0x0040, 0x872: 0x0040, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040,
+ 0x876: 0x0040, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0018, 0x87b: 0x0018,
+ 0x87c: 0x0018, 0x87d: 0x0018, 0x87e: 0x0018, 0x87f: 0x0018,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0008, 0x881: 0x3308, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x0018, 0x885: 0x0008,
+ 0x886: 0x0008, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0008, 0x88a: 0x0008, 0x88b: 0x0008,
+ 0x88c: 0x0008, 0x88d: 0x0040, 0x88e: 0x0008, 0x88f: 0x0008, 0x890: 0x0008, 0x891: 0x0040,
+ 0x892: 0x0008, 0x893: 0x0008, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008,
+ 0x898: 0x0008, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008,
+ 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008,
+ 0x8a4: 0x0008, 0x8a5: 0x0008, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0040,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0008, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0008, 0x8b4: 0x0040, 0x8b5: 0x0008,
+ 0x8b6: 0x0008, 0x8b7: 0x0008, 0x8b8: 0x0008, 0x8b9: 0x0008, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x3308, 0x8bd: 0x0008, 0x8be: 0x3008, 0x8bf: 0x3308,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3008, 0x8c2: 0x3008, 0x8c3: 0x3008, 0x8c4: 0x3008, 0x8c5: 0x0040,
+ 0x8c6: 0x3308, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3308, 0x8cd: 0x3b08, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0040, 0x8d5: 0x3008, 0x8d6: 0x3008, 0x8d7: 0x0040,
+ 0x8d8: 0x0040, 0x8d9: 0x0040, 0x8da: 0x0040, 0x8db: 0x0040, 0x8dc: 0x0040, 0x8dd: 0x0008,
+ 0x8de: 0x0008, 0x8df: 0x0040, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0040, 0x8f1: 0x0008, 0x8f2: 0x0008, 0x8f3: 0x3008, 0x8f4: 0x0040, 0x8f5: 0x0040,
+ 0x8f6: 0x0040, 0x8f7: 0x0040, 0x8f8: 0x0040, 0x8f9: 0x0040, 0x8fa: 0x0040, 0x8fb: 0x0040,
+ 0x8fc: 0x0040, 0x8fd: 0x0040, 0x8fe: 0x0040, 0x8ff: 0x0040,
+ // Block 0x24, offset 0x900
+ 0x900: 0x3008, 0x901: 0x3308, 0x902: 0x3308, 0x903: 0x3308, 0x904: 0x3308, 0x905: 0x0040,
+ 0x906: 0x3008, 0x907: 0x3008, 0x908: 0x3008, 0x909: 0x0040, 0x90a: 0x3008, 0x90b: 0x3008,
+ 0x90c: 0x3008, 0x90d: 0x3b08, 0x90e: 0x0008, 0x90f: 0x0018, 0x910: 0x0040, 0x911: 0x0040,
+ 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x3008,
+ 0x918: 0x0018, 0x919: 0x0018, 0x91a: 0x0018, 0x91b: 0x0018, 0x91c: 0x0018, 0x91d: 0x0018,
+ 0x91e: 0x0018, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x3308, 0x923: 0x3308,
+ 0x924: 0x0040, 0x925: 0x0040, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0018, 0x931: 0x0018, 0x932: 0x0018, 0x933: 0x0018, 0x934: 0x0018, 0x935: 0x0018,
+ 0x936: 0x0018, 0x937: 0x0018, 0x938: 0x0018, 0x939: 0x0018, 0x93a: 0x0008, 0x93b: 0x0008,
+ 0x93c: 0x0008, 0x93d: 0x0008, 0x93e: 0x0008, 0x93f: 0x0008,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0040, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0040, 0x944: 0x0008, 0x945: 0x0040,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0040,
+ 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0040, 0x965: 0x0008, 0x966: 0x0040, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0008, 0x96e: 0x0008, 0x96f: 0x0008,
+ 0x970: 0x0008, 0x971: 0x3308, 0x972: 0x0008, 0x973: 0x01f9, 0x974: 0x3308, 0x975: 0x3308,
+ 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x3308, 0x97a: 0x3b08, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x0008, 0x97e: 0x0040, 0x97f: 0x0040,
+ // Block 0x26, offset 0x980
+ 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0211, 0x984: 0x0008, 0x985: 0x0008,
+ 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0040, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x0219, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008,
+ 0x992: 0x0221, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0229,
+ 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0231, 0x99d: 0x0008,
+ 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008,
+ 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0239,
+ 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0040, 0x9ae: 0x0040, 0x9af: 0x0040,
+ 0x9b0: 0x0040, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x0241, 0x9b4: 0x3308, 0x9b5: 0x0249,
+ 0x9b6: 0x0251, 0x9b7: 0x0259, 0x9b8: 0x0261, 0x9b9: 0x0269, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x3308, 0x9be: 0x3308, 0x9bf: 0x3008,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x3308, 0x9c1: 0x0271, 0x9c2: 0x3308, 0x9c3: 0x3308, 0x9c4: 0x3b08, 0x9c5: 0x0018,
+ 0x9c6: 0x3308, 0x9c7: 0x3308, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x3308, 0x9ce: 0x3308, 0x9cf: 0x3308, 0x9d0: 0x3308, 0x9d1: 0x3308,
+ 0x9d2: 0x3308, 0x9d3: 0x0279, 0x9d4: 0x3308, 0x9d5: 0x3308, 0x9d6: 0x3308, 0x9d7: 0x3308,
+ 0x9d8: 0x0040, 0x9d9: 0x3308, 0x9da: 0x3308, 0x9db: 0x3308, 0x9dc: 0x3308, 0x9dd: 0x0281,
+ 0x9de: 0x3308, 0x9df: 0x3308, 0x9e0: 0x3308, 0x9e1: 0x3308, 0x9e2: 0x0289, 0x9e3: 0x3308,
+ 0x9e4: 0x3308, 0x9e5: 0x3308, 0x9e6: 0x3308, 0x9e7: 0x0291, 0x9e8: 0x3308, 0x9e9: 0x3308,
+ 0x9ea: 0x3308, 0x9eb: 0x3308, 0x9ec: 0x0299, 0x9ed: 0x3308, 0x9ee: 0x3308, 0x9ef: 0x3308,
+ 0x9f0: 0x3308, 0x9f1: 0x3308, 0x9f2: 0x3308, 0x9f3: 0x3308, 0x9f4: 0x3308, 0x9f5: 0x3308,
+ 0x9f6: 0x3308, 0x9f7: 0x3308, 0x9f8: 0x3308, 0x9f9: 0x02a1, 0x9fa: 0x3308, 0x9fb: 0x3308,
+ 0x9fc: 0x3308, 0x9fd: 0x0040, 0x9fe: 0x0018, 0x9ff: 0x0018,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008,
+ 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008,
+ 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008,
+ 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008,
+ 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x0008, 0xa1c: 0x0008, 0xa1d: 0x0008,
+ 0xa1e: 0x0008, 0xa1f: 0x0008, 0xa20: 0x0008, 0xa21: 0x0008, 0xa22: 0x0008, 0xa23: 0x0008,
+ 0xa24: 0x0008, 0xa25: 0x0008, 0xa26: 0x0008, 0xa27: 0x0008, 0xa28: 0x0008, 0xa29: 0x0008,
+ 0xa2a: 0x0008, 0xa2b: 0x0008, 0xa2c: 0x0019, 0xa2d: 0x02e1, 0xa2e: 0x02e9, 0xa2f: 0x0008,
+ 0xa30: 0x02f1, 0xa31: 0x02f9, 0xa32: 0x0301, 0xa33: 0x0309, 0xa34: 0x00a9, 0xa35: 0x0311,
+ 0xa36: 0x00b1, 0xa37: 0x0319, 0xa38: 0x0101, 0xa39: 0x0321, 0xa3a: 0x0329, 0xa3b: 0x0008,
+ 0xa3c: 0x0051, 0xa3d: 0x0331, 0xa3e: 0x0339, 0xa3f: 0x00b9,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0341, 0xa41: 0x0349, 0xa42: 0x00c1, 0xa43: 0x0019, 0xa44: 0x0351, 0xa45: 0x0359,
+ 0xa46: 0x05b5, 0xa47: 0x02e9, 0xa48: 0x02f1, 0xa49: 0x02f9, 0xa4a: 0x0361, 0xa4b: 0x0369,
+ 0xa4c: 0x0371, 0xa4d: 0x0309, 0xa4e: 0x0008, 0xa4f: 0x0319, 0xa50: 0x0321, 0xa51: 0x0379,
+ 0xa52: 0x0051, 0xa53: 0x0381, 0xa54: 0x05cd, 0xa55: 0x05cd, 0xa56: 0x0339, 0xa57: 0x0341,
+ 0xa58: 0x0349, 0xa59: 0x05b5, 0xa5a: 0x0389, 0xa5b: 0x0391, 0xa5c: 0x05e5, 0xa5d: 0x0399,
+ 0xa5e: 0x03a1, 0xa5f: 0x03a9, 0xa60: 0x03b1, 0xa61: 0x03b9, 0xa62: 0x0311, 0xa63: 0x00b9,
+ 0xa64: 0x0349, 0xa65: 0x0391, 0xa66: 0x0399, 0xa67: 0x03a1, 0xa68: 0x03c1, 0xa69: 0x03b1,
+ 0xa6a: 0x03b9, 0xa6b: 0x0008, 0xa6c: 0x0008, 0xa6d: 0x0008, 0xa6e: 0x0008, 0xa6f: 0x0008,
+ 0xa70: 0x0008, 0xa71: 0x0008, 0xa72: 0x0008, 0xa73: 0x0008, 0xa74: 0x0008, 0xa75: 0x0008,
+ 0xa76: 0x0008, 0xa77: 0x0008, 0xa78: 0x03c9, 0xa79: 0x0008, 0xa7a: 0x0008, 0xa7b: 0x0008,
+ 0xa7c: 0x0008, 0xa7d: 0x0008, 0xa7e: 0x0008, 0xa7f: 0x0008,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008,
+ 0xa86: 0x0008, 0xa87: 0x0008, 0xa88: 0x0008, 0xa89: 0x0008, 0xa8a: 0x0008, 0xa8b: 0x0008,
+ 0xa8c: 0x0008, 0xa8d: 0x0008, 0xa8e: 0x0008, 0xa8f: 0x0008, 0xa90: 0x0008, 0xa91: 0x0008,
+ 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008,
+ 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0008, 0xa9b: 0x03d1, 0xa9c: 0x03d9, 0xa9d: 0x03e1,
+ 0xa9e: 0x03e9, 0xa9f: 0x0371, 0xaa0: 0x03f1, 0xaa1: 0x03f9, 0xaa2: 0x0401, 0xaa3: 0x0409,
+ 0xaa4: 0x0411, 0xaa5: 0x0419, 0xaa6: 0x0421, 0xaa7: 0x05fd, 0xaa8: 0x0429, 0xaa9: 0x0431,
+ 0xaaa: 0xe17d, 0xaab: 0x0439, 0xaac: 0x0441, 0xaad: 0x0449, 0xaae: 0x0451, 0xaaf: 0x0459,
+ 0xab0: 0x0461, 0xab1: 0x0469, 0xab2: 0x0471, 0xab3: 0x0479, 0xab4: 0x0481, 0xab5: 0x0489,
+ 0xab6: 0x0491, 0xab7: 0x0499, 0xab8: 0x0615, 0xab9: 0x04a1, 0xaba: 0x04a9, 0xabb: 0x04b1,
+ 0xabc: 0x04b9, 0xabd: 0x04c1, 0xabe: 0x04c9, 0xabf: 0x04d1,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0xe00d, 0xad7: 0x0008,
+ 0xad8: 0xe00d, 0xad9: 0x0008, 0xada: 0xe00d, 0xadb: 0x0008, 0xadc: 0xe00d, 0xadd: 0x0008,
+ 0xade: 0xe00d, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0xe00d, 0xb01: 0x0008, 0xb02: 0xe00d, 0xb03: 0x0008, 0xb04: 0xe00d, 0xb05: 0x0008,
+ 0xb06: 0xe00d, 0xb07: 0x0008, 0xb08: 0xe00d, 0xb09: 0x0008, 0xb0a: 0xe00d, 0xb0b: 0x0008,
+ 0xb0c: 0xe00d, 0xb0d: 0x0008, 0xb0e: 0xe00d, 0xb0f: 0x0008, 0xb10: 0xe00d, 0xb11: 0x0008,
+ 0xb12: 0xe00d, 0xb13: 0x0008, 0xb14: 0xe00d, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0008, 0xb19: 0x0008, 0xb1a: 0x062d, 0xb1b: 0x064d, 0xb1c: 0x0008, 0xb1d: 0x0008,
+ 0xb1e: 0x04d9, 0xb1f: 0x0008, 0xb20: 0xe00d, 0xb21: 0x0008, 0xb22: 0xe00d, 0xb23: 0x0008,
+ 0xb24: 0xe00d, 0xb25: 0x0008, 0xb26: 0xe00d, 0xb27: 0x0008, 0xb28: 0xe00d, 0xb29: 0x0008,
+ 0xb2a: 0xe00d, 0xb2b: 0x0008, 0xb2c: 0xe00d, 0xb2d: 0x0008, 0xb2e: 0xe00d, 0xb2f: 0x0008,
+ 0xb30: 0xe00d, 0xb31: 0x0008, 0xb32: 0xe00d, 0xb33: 0x0008, 0xb34: 0xe00d, 0xb35: 0x0008,
+ 0xb36: 0xe00d, 0xb37: 0x0008, 0xb38: 0xe00d, 0xb39: 0x0008, 0xb3a: 0xe00d, 0xb3b: 0x0008,
+ 0xb3c: 0xe00d, 0xb3d: 0x0008, 0xb3e: 0xe00d, 0xb3f: 0x0008,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x0008, 0xb41: 0x0008, 0xb42: 0x0008, 0xb43: 0x0008, 0xb44: 0x0008, 0xb45: 0x0008,
+ 0xb46: 0x0040, 0xb47: 0x0040, 0xb48: 0xe045, 0xb49: 0xe045, 0xb4a: 0xe045, 0xb4b: 0xe045,
+ 0xb4c: 0xe045, 0xb4d: 0xe045, 0xb4e: 0x0040, 0xb4f: 0x0040, 0xb50: 0x0008, 0xb51: 0x0008,
+ 0xb52: 0x0008, 0xb53: 0x0008, 0xb54: 0x0008, 0xb55: 0x0008, 0xb56: 0x0008, 0xb57: 0x0008,
+ 0xb58: 0x0040, 0xb59: 0xe045, 0xb5a: 0x0040, 0xb5b: 0xe045, 0xb5c: 0x0040, 0xb5d: 0xe045,
+ 0xb5e: 0x0040, 0xb5f: 0xe045, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x0008,
+ 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045,
+ 0xb6a: 0xe045, 0xb6b: 0xe045, 0xb6c: 0xe045, 0xb6d: 0xe045, 0xb6e: 0xe045, 0xb6f: 0xe045,
+ 0xb70: 0x0008, 0xb71: 0x04e1, 0xb72: 0x0008, 0xb73: 0x04e9, 0xb74: 0x0008, 0xb75: 0x04f1,
+ 0xb76: 0x0008, 0xb77: 0x04f9, 0xb78: 0x0008, 0xb79: 0x0501, 0xb7a: 0x0008, 0xb7b: 0x0509,
+ 0xb7c: 0x0008, 0xb7d: 0x0511, 0xb7e: 0x0040, 0xb7f: 0x0040,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x0519, 0xb81: 0x0521, 0xb82: 0x0529, 0xb83: 0x0531, 0xb84: 0x0539, 0xb85: 0x0541,
+ 0xb86: 0x0549, 0xb87: 0x0551, 0xb88: 0x0519, 0xb89: 0x0521, 0xb8a: 0x0529, 0xb8b: 0x0531,
+ 0xb8c: 0x0539, 0xb8d: 0x0541, 0xb8e: 0x0549, 0xb8f: 0x0551, 0xb90: 0x0559, 0xb91: 0x0561,
+ 0xb92: 0x0569, 0xb93: 0x0571, 0xb94: 0x0579, 0xb95: 0x0581, 0xb96: 0x0589, 0xb97: 0x0591,
+ 0xb98: 0x0559, 0xb99: 0x0561, 0xb9a: 0x0569, 0xb9b: 0x0571, 0xb9c: 0x0579, 0xb9d: 0x0581,
+ 0xb9e: 0x0589, 0xb9f: 0x0591, 0xba0: 0x0599, 0xba1: 0x05a1, 0xba2: 0x05a9, 0xba3: 0x05b1,
+ 0xba4: 0x05b9, 0xba5: 0x05c1, 0xba6: 0x05c9, 0xba7: 0x05d1, 0xba8: 0x0599, 0xba9: 0x05a1,
+ 0xbaa: 0x05a9, 0xbab: 0x05b1, 0xbac: 0x05b9, 0xbad: 0x05c1, 0xbae: 0x05c9, 0xbaf: 0x05d1,
+ 0xbb0: 0x0008, 0xbb1: 0x0008, 0xbb2: 0x05d9, 0xbb3: 0x05e1, 0xbb4: 0x05e9, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x05f1, 0xbb8: 0xe045, 0xbb9: 0xe045, 0xbba: 0x0665, 0xbbb: 0x04e1,
+ 0xbbc: 0x05e1, 0xbbd: 0x067e, 0xbbe: 0x05f9, 0xbbf: 0x069e,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x06be, 0xbc1: 0x0602, 0xbc2: 0x0609, 0xbc3: 0x0611, 0xbc4: 0x0619, 0xbc5: 0x0040,
+ 0xbc6: 0x0008, 0xbc7: 0x0621, 0xbc8: 0x06dd, 0xbc9: 0x04e9, 0xbca: 0x06f5, 0xbcb: 0x04f1,
+ 0xbcc: 0x0611, 0xbcd: 0x062a, 0xbce: 0x0632, 0xbcf: 0x063a, 0xbd0: 0x0008, 0xbd1: 0x0008,
+ 0xbd2: 0x0008, 0xbd3: 0x0641, 0xbd4: 0x0040, 0xbd5: 0x0040, 0xbd6: 0x0008, 0xbd7: 0x0008,
+ 0xbd8: 0xe045, 0xbd9: 0xe045, 0xbda: 0x070d, 0xbdb: 0x04f9, 0xbdc: 0x0040, 0xbdd: 0x064a,
+ 0xbde: 0x0652, 0xbdf: 0x065a, 0xbe0: 0x0008, 0xbe1: 0x0008, 0xbe2: 0x0008, 0xbe3: 0x0661,
+ 0xbe4: 0x0008, 0xbe5: 0x0008, 0xbe6: 0x0008, 0xbe7: 0x0008, 0xbe8: 0xe045, 0xbe9: 0xe045,
+ 0xbea: 0x0725, 0xbeb: 0x0509, 0xbec: 0xe04d, 0xbed: 0x066a, 0xbee: 0x012a, 0xbef: 0x0672,
+ 0xbf0: 0x0040, 0xbf1: 0x0040, 0xbf2: 0x0679, 0xbf3: 0x0681, 0xbf4: 0x0689, 0xbf5: 0x0040,
+ 0xbf6: 0x0008, 0xbf7: 0x0691, 0xbf8: 0x073d, 0xbf9: 0x0501, 0xbfa: 0x0515, 0xbfb: 0x0511,
+ 0xbfc: 0x0681, 0xbfd: 0x0756, 0xbfe: 0x0776, 0xbff: 0x0040,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x000a, 0xc01: 0x000a, 0xc02: 0x000a, 0xc03: 0x000a, 0xc04: 0x000a, 0xc05: 0x000a,
+ 0xc06: 0x000a, 0xc07: 0x000a, 0xc08: 0x000a, 0xc09: 0x000a, 0xc0a: 0x000a, 0xc0b: 0x03c0,
+ 0xc0c: 0x0003, 0xc0d: 0x0003, 0xc0e: 0x0340, 0xc0f: 0x0b40, 0xc10: 0x0018, 0xc11: 0xe00d,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x0796,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018,
+ 0xc24: 0x0040, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0018, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x000a,
+ 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0699, 0xc34: 0x06a1, 0xc35: 0x0018,
+ 0xc36: 0x06a9, 0xc37: 0x06b1, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018,
+ 0xc3c: 0x06ba, 0xc3d: 0x0018, 0xc3e: 0x07b6, 0xc3f: 0x0018,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x0018, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0018,
+ 0xc46: 0x0018, 0xc47: 0x06c2, 0xc48: 0x06ca, 0xc49: 0x06d2, 0xc4a: 0x0018, 0xc4b: 0x0018,
+ 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0018, 0xc4f: 0x0018, 0xc50: 0x0018, 0xc51: 0x0018,
+ 0xc52: 0x0018, 0xc53: 0x0018, 0xc54: 0x0018, 0xc55: 0x0018, 0xc56: 0x0018, 0xc57: 0x06d9,
+ 0xc58: 0x0018, 0xc59: 0x0018, 0xc5a: 0x0018, 0xc5b: 0x0018, 0xc5c: 0x0018, 0xc5d: 0x0018,
+ 0xc5e: 0x0018, 0xc5f: 0x000a, 0xc60: 0x03c0, 0xc61: 0x0340, 0xc62: 0x0340, 0xc63: 0x0340,
+ 0xc64: 0x03c0, 0xc65: 0x0040, 0xc66: 0x0040, 0xc67: 0x0040, 0xc68: 0x0040, 0xc69: 0x0040,
+ 0xc6a: 0x0340, 0xc6b: 0x0340, 0xc6c: 0x0340, 0xc6d: 0x0340, 0xc6e: 0x0340, 0xc6f: 0x0340,
+ 0xc70: 0x06e1, 0xc71: 0x0311, 0xc72: 0x0040, 0xc73: 0x0040, 0xc74: 0x06e9, 0xc75: 0x06f1,
+ 0xc76: 0x06f9, 0xc77: 0x0701, 0xc78: 0x0709, 0xc79: 0x0711, 0xc7a: 0x071a, 0xc7b: 0x07d5,
+ 0xc7c: 0x0722, 0xc7d: 0x072a, 0xc7e: 0x0732, 0xc7f: 0x0329,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x06e1, 0xc81: 0x0049, 0xc82: 0x0029, 0xc83: 0x0031, 0xc84: 0x06e9, 0xc85: 0x06f1,
+ 0xc86: 0x06f9, 0xc87: 0x0701, 0xc88: 0x0709, 0xc89: 0x0711, 0xc8a: 0x071a, 0xc8b: 0x07ed,
+ 0xc8c: 0x0722, 0xc8d: 0x072a, 0xc8e: 0x0732, 0xc8f: 0x0040, 0xc90: 0x0019, 0xc91: 0x02f9,
+ 0xc92: 0x0051, 0xc93: 0x0109, 0xc94: 0x0361, 0xc95: 0x00a9, 0xc96: 0x0319, 0xc97: 0x0101,
+ 0xc98: 0x0321, 0xc99: 0x0329, 0xc9a: 0x0339, 0xc9b: 0x0089, 0xc9c: 0x0341, 0xc9d: 0x0040,
+ 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x0018, 0xca1: 0x0018, 0xca2: 0x0018, 0xca3: 0x0018,
+ 0xca4: 0x0018, 0xca5: 0x0018, 0xca6: 0x0018, 0xca7: 0x0018, 0xca8: 0x0739, 0xca9: 0x0018,
+ 0xcaa: 0x0018, 0xcab: 0x0018, 0xcac: 0x0018, 0xcad: 0x0018, 0xcae: 0x0018, 0xcaf: 0x0018,
+ 0xcb0: 0x0018, 0xcb1: 0x0018, 0xcb2: 0x0018, 0xcb3: 0x0018, 0xcb4: 0x0018, 0xcb5: 0x0018,
+ 0xcb6: 0x0018, 0xcb7: 0x0018, 0xcb8: 0x0018, 0xcb9: 0x0018, 0xcba: 0x0018, 0xcbb: 0x0018,
+ 0xcbc: 0x0018, 0xcbd: 0x0018, 0xcbe: 0x0018, 0xcbf: 0x0018,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x0806, 0xcc1: 0x0826, 0xcc2: 0x03d9, 0xcc3: 0x0845, 0xcc4: 0x0018, 0xcc5: 0x0866,
+ 0xcc6: 0x0886, 0xcc7: 0x0369, 0xcc8: 0x0018, 0xcc9: 0x08a5, 0xcca: 0x0309, 0xccb: 0x00a9,
+ 0xccc: 0x00a9, 0xccd: 0x00a9, 0xcce: 0x00a9, 0xccf: 0x0741, 0xcd0: 0x0311, 0xcd1: 0x0311,
+ 0xcd2: 0x0101, 0xcd3: 0x0101, 0xcd4: 0x0018, 0xcd5: 0x0329, 0xcd6: 0x0749, 0xcd7: 0x0018,
+ 0xcd8: 0x0018, 0xcd9: 0x0339, 0xcda: 0x0751, 0xcdb: 0x00b9, 0xcdc: 0x00b9, 0xcdd: 0x00b9,
+ 0xcde: 0x0018, 0xcdf: 0x0018, 0xce0: 0x0759, 0xce1: 0x08c5, 0xce2: 0x0761, 0xce3: 0x0018,
+ 0xce4: 0x04b1, 0xce5: 0x0018, 0xce6: 0x0769, 0xce7: 0x0018, 0xce8: 0x04b1, 0xce9: 0x0018,
+ 0xcea: 0x0319, 0xceb: 0x0771, 0xcec: 0x02e9, 0xced: 0x03d9, 0xcee: 0x0018, 0xcef: 0x02f9,
+ 0xcf0: 0x02f9, 0xcf1: 0x03f1, 0xcf2: 0x0040, 0xcf3: 0x0321, 0xcf4: 0x0051, 0xcf5: 0x0779,
+ 0xcf6: 0x0781, 0xcf7: 0x0789, 0xcf8: 0x0791, 0xcf9: 0x0311, 0xcfa: 0x0018, 0xcfb: 0x08e5,
+ 0xcfc: 0x0799, 0xcfd: 0x03a1, 0xcfe: 0x03a1, 0xcff: 0x0799,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0905, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x02f1,
+ 0xd06: 0x02f1, 0xd07: 0x02f9, 0xd08: 0x0311, 0xd09: 0x00b1, 0xd0a: 0x0018, 0xd0b: 0x0018,
+ 0xd0c: 0x0018, 0xd0d: 0x0018, 0xd0e: 0x0008, 0xd0f: 0x0018, 0xd10: 0x07a1, 0xd11: 0x07a9,
+ 0xd12: 0x07b1, 0xd13: 0x07b9, 0xd14: 0x07c1, 0xd15: 0x07c9, 0xd16: 0x07d1, 0xd17: 0x07d9,
+ 0xd18: 0x07e1, 0xd19: 0x07e9, 0xd1a: 0x07f1, 0xd1b: 0x07f9, 0xd1c: 0x0801, 0xd1d: 0x0809,
+ 0xd1e: 0x0811, 0xd1f: 0x0819, 0xd20: 0x0311, 0xd21: 0x0821, 0xd22: 0x091d, 0xd23: 0x0829,
+ 0xd24: 0x0391, 0xd25: 0x0831, 0xd26: 0x093d, 0xd27: 0x0839, 0xd28: 0x0841, 0xd29: 0x0109,
+ 0xd2a: 0x0849, 0xd2b: 0x095d, 0xd2c: 0x0101, 0xd2d: 0x03d9, 0xd2e: 0x02f1, 0xd2f: 0x0321,
+ 0xd30: 0x0311, 0xd31: 0x0821, 0xd32: 0x097d, 0xd33: 0x0829, 0xd34: 0x0391, 0xd35: 0x0831,
+ 0xd36: 0x099d, 0xd37: 0x0839, 0xd38: 0x0841, 0xd39: 0x0109, 0xd3a: 0x0849, 0xd3b: 0x09bd,
+ 0xd3c: 0x0101, 0xd3d: 0x03d9, 0xd3e: 0x02f1, 0xd3f: 0x0321,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x0018, 0xd41: 0x0018, 0xd42: 0x0018, 0xd43: 0x0018, 0xd44: 0x0018, 0xd45: 0x0018,
+ 0xd46: 0x0018, 0xd47: 0x0018, 0xd48: 0x0018, 0xd49: 0x0018, 0xd4a: 0x0018, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0040, 0xd5d: 0x0040,
+ 0xd5e: 0x0040, 0xd5f: 0x0040, 0xd60: 0x0049, 0xd61: 0x0029, 0xd62: 0x0031, 0xd63: 0x06e9,
+ 0xd64: 0x06f1, 0xd65: 0x06f9, 0xd66: 0x0701, 0xd67: 0x0709, 0xd68: 0x0711, 0xd69: 0x0879,
+ 0xd6a: 0x0881, 0xd6b: 0x0889, 0xd6c: 0x0891, 0xd6d: 0x0899, 0xd6e: 0x08a1, 0xd6f: 0x08a9,
+ 0xd70: 0x08b1, 0xd71: 0x08b9, 0xd72: 0x08c1, 0xd73: 0x08c9, 0xd74: 0x0a1e, 0xd75: 0x0a3e,
+ 0xd76: 0x0a5e, 0xd77: 0x0a7e, 0xd78: 0x0a9e, 0xd79: 0x0abe, 0xd7a: 0x0ade, 0xd7b: 0x0afe,
+ 0xd7c: 0x0b1e, 0xd7d: 0x08d2, 0xd7e: 0x08da, 0xd7f: 0x08e2,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x08ea, 0xd81: 0x08f2, 0xd82: 0x08fa, 0xd83: 0x0902, 0xd84: 0x090a, 0xd85: 0x0912,
+ 0xd86: 0x091a, 0xd87: 0x0922, 0xd88: 0x0040, 0xd89: 0x0040, 0xd8a: 0x0040, 0xd8b: 0x0040,
+ 0xd8c: 0x0040, 0xd8d: 0x0040, 0xd8e: 0x0040, 0xd8f: 0x0040, 0xd90: 0x0040, 0xd91: 0x0040,
+ 0xd92: 0x0040, 0xd93: 0x0040, 0xd94: 0x0040, 0xd95: 0x0040, 0xd96: 0x0040, 0xd97: 0x0040,
+ 0xd98: 0x0040, 0xd99: 0x0040, 0xd9a: 0x0040, 0xd9b: 0x0040, 0xd9c: 0x0b3e, 0xd9d: 0x0b5e,
+ 0xd9e: 0x0b7e, 0xd9f: 0x0b9e, 0xda0: 0x0bbe, 0xda1: 0x0bde, 0xda2: 0x0bfe, 0xda3: 0x0c1e,
+ 0xda4: 0x0c3e, 0xda5: 0x0c5e, 0xda6: 0x0c7e, 0xda7: 0x0c9e, 0xda8: 0x0cbe, 0xda9: 0x0cde,
+ 0xdaa: 0x0cfe, 0xdab: 0x0d1e, 0xdac: 0x0d3e, 0xdad: 0x0d5e, 0xdae: 0x0d7e, 0xdaf: 0x0d9e,
+ 0xdb0: 0x0dbe, 0xdb1: 0x0dde, 0xdb2: 0x0dfe, 0xdb3: 0x0e1e, 0xdb4: 0x0e3e, 0xdb5: 0x0e5e,
+ 0xdb6: 0x0019, 0xdb7: 0x02e9, 0xdb8: 0x03d9, 0xdb9: 0x02f1, 0xdba: 0x02f9, 0xdbb: 0x03f1,
+ 0xdbc: 0x0309, 0xdbd: 0x00a9, 0xdbe: 0x0311, 0xdbf: 0x00b1,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0319, 0xdc1: 0x0101, 0xdc2: 0x0321, 0xdc3: 0x0329, 0xdc4: 0x0051, 0xdc5: 0x0339,
+ 0xdc6: 0x0751, 0xdc7: 0x00b9, 0xdc8: 0x0089, 0xdc9: 0x0341, 0xdca: 0x0349, 0xdcb: 0x0391,
+ 0xdcc: 0x00c1, 0xdcd: 0x0109, 0xdce: 0x00c9, 0xdcf: 0x04b1, 0xdd0: 0x0019, 0xdd1: 0x02e9,
+ 0xdd2: 0x03d9, 0xdd3: 0x02f1, 0xdd4: 0x02f9, 0xdd5: 0x03f1, 0xdd6: 0x0309, 0xdd7: 0x00a9,
+ 0xdd8: 0x0311, 0xdd9: 0x00b1, 0xdda: 0x0319, 0xddb: 0x0101, 0xddc: 0x0321, 0xddd: 0x0329,
+ 0xdde: 0x0051, 0xddf: 0x0339, 0xde0: 0x0751, 0xde1: 0x00b9, 0xde2: 0x0089, 0xde3: 0x0341,
+ 0xde4: 0x0349, 0xde5: 0x0391, 0xde6: 0x00c1, 0xde7: 0x0109, 0xde8: 0x00c9, 0xde9: 0x04b1,
+ 0xdea: 0x06e1, 0xdeb: 0x0018, 0xdec: 0x0018, 0xded: 0x0018, 0xdee: 0x0018, 0xdef: 0x0018,
+ 0xdf0: 0x0018, 0xdf1: 0x0018, 0xdf2: 0x0018, 0xdf3: 0x0018, 0xdf4: 0x0018, 0xdf5: 0x0018,
+ 0xdf6: 0x0018, 0xdf7: 0x0018, 0xdf8: 0x0018, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018,
+ 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x0008, 0xe01: 0x0008, 0xe02: 0x0008, 0xe03: 0x0008, 0xe04: 0x0008, 0xe05: 0x0008,
+ 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0008, 0xe09: 0x0008, 0xe0a: 0x0008, 0xe0b: 0x0008,
+ 0xe0c: 0x0008, 0xe0d: 0x0008, 0xe0e: 0x0008, 0xe0f: 0x0008, 0xe10: 0x0008, 0xe11: 0x0008,
+ 0xe12: 0x0008, 0xe13: 0x0008, 0xe14: 0x0008, 0xe15: 0x0008, 0xe16: 0x0008, 0xe17: 0x0008,
+ 0xe18: 0x0008, 0xe19: 0x0008, 0xe1a: 0x0008, 0xe1b: 0x0008, 0xe1c: 0x0008, 0xe1d: 0x0008,
+ 0xe1e: 0x0008, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0x0941, 0xe23: 0x0ed5,
+ 0xe24: 0x0949, 0xe25: 0x0008, 0xe26: 0x0008, 0xe27: 0xe07d, 0xe28: 0x0008, 0xe29: 0xe01d,
+ 0xe2a: 0x0008, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0x0359, 0xe2e: 0x0441, 0xe2f: 0x0351,
+ 0xe30: 0x03d1, 0xe31: 0x0008, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0008, 0xe35: 0xe01d,
+ 0xe36: 0x0008, 0xe37: 0x0008, 0xe38: 0x0008, 0xe39: 0x0008, 0xe3a: 0x0008, 0xe3b: 0x0008,
+ 0xe3c: 0x00b1, 0xe3d: 0x0391, 0xe3e: 0x0951, 0xe3f: 0x0959,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0xe00d, 0xe41: 0x0008, 0xe42: 0xe00d, 0xe43: 0x0008, 0xe44: 0xe00d, 0xe45: 0x0008,
+ 0xe46: 0xe00d, 0xe47: 0x0008, 0xe48: 0xe00d, 0xe49: 0x0008, 0xe4a: 0xe00d, 0xe4b: 0x0008,
+ 0xe4c: 0xe00d, 0xe4d: 0x0008, 0xe4e: 0xe00d, 0xe4f: 0x0008, 0xe50: 0xe00d, 0xe51: 0x0008,
+ 0xe52: 0xe00d, 0xe53: 0x0008, 0xe54: 0xe00d, 0xe55: 0x0008, 0xe56: 0xe00d, 0xe57: 0x0008,
+ 0xe58: 0xe00d, 0xe59: 0x0008, 0xe5a: 0xe00d, 0xe5b: 0x0008, 0xe5c: 0xe00d, 0xe5d: 0x0008,
+ 0xe5e: 0xe00d, 0xe5f: 0x0008, 0xe60: 0xe00d, 0xe61: 0x0008, 0xe62: 0xe00d, 0xe63: 0x0008,
+ 0xe64: 0x0008, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018,
+ 0xe6a: 0x0018, 0xe6b: 0xe03d, 0xe6c: 0x0008, 0xe6d: 0xe01d, 0xe6e: 0x0008, 0xe6f: 0x3308,
+ 0xe70: 0x3308, 0xe71: 0x3308, 0xe72: 0xe00d, 0xe73: 0x0008, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0018, 0xe7a: 0x0018, 0xe7b: 0x0018,
+ 0xe7c: 0x0018, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x2715, 0xe81: 0x2735, 0xe82: 0x2755, 0xe83: 0x2775, 0xe84: 0x2795, 0xe85: 0x27b5,
+ 0xe86: 0x27d5, 0xe87: 0x27f5, 0xe88: 0x2815, 0xe89: 0x2835, 0xe8a: 0x2855, 0xe8b: 0x2875,
+ 0xe8c: 0x2895, 0xe8d: 0x28b5, 0xe8e: 0x28d5, 0xe8f: 0x28f5, 0xe90: 0x2915, 0xe91: 0x2935,
+ 0xe92: 0x2955, 0xe93: 0x2975, 0xe94: 0x2995, 0xe95: 0x29b5, 0xe96: 0x0040, 0xe97: 0x0040,
+ 0xe98: 0x0040, 0xe99: 0x0040, 0xe9a: 0x0040, 0xe9b: 0x0040, 0xe9c: 0x0040, 0xe9d: 0x0040,
+ 0xe9e: 0x0040, 0xe9f: 0x0040, 0xea0: 0x0040, 0xea1: 0x0040, 0xea2: 0x0040, 0xea3: 0x0040,
+ 0xea4: 0x0040, 0xea5: 0x0040, 0xea6: 0x0040, 0xea7: 0x0040, 0xea8: 0x0040, 0xea9: 0x0040,
+ 0xeaa: 0x0040, 0xeab: 0x0040, 0xeac: 0x0040, 0xead: 0x0040, 0xeae: 0x0040, 0xeaf: 0x0040,
+ 0xeb0: 0x0040, 0xeb1: 0x0040, 0xeb2: 0x0040, 0xeb3: 0x0040, 0xeb4: 0x0040, 0xeb5: 0x0040,
+ 0xeb6: 0x0040, 0xeb7: 0x0040, 0xeb8: 0x0040, 0xeb9: 0x0040, 0xeba: 0x0040, 0xebb: 0x0040,
+ 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x000a, 0xec1: 0x0018, 0xec2: 0x0961, 0xec3: 0x0018, 0xec4: 0x0018, 0xec5: 0x0008,
+ 0xec6: 0x0008, 0xec7: 0x0008, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018,
+ 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x0018, 0xed1: 0x0018,
+ 0xed2: 0x0018, 0xed3: 0x0018, 0xed4: 0x0018, 0xed5: 0x0018, 0xed6: 0x0018, 0xed7: 0x0018,
+ 0xed8: 0x0018, 0xed9: 0x0018, 0xeda: 0x0018, 0xedb: 0x0018, 0xedc: 0x0018, 0xedd: 0x0018,
+ 0xede: 0x0018, 0xedf: 0x0018, 0xee0: 0x0018, 0xee1: 0x0018, 0xee2: 0x0018, 0xee3: 0x0018,
+ 0xee4: 0x0018, 0xee5: 0x0018, 0xee6: 0x0018, 0xee7: 0x0018, 0xee8: 0x0018, 0xee9: 0x0018,
+ 0xeea: 0x3308, 0xeeb: 0x3308, 0xeec: 0x3308, 0xeed: 0x3308, 0xeee: 0x3018, 0xeef: 0x3018,
+ 0xef0: 0x0018, 0xef1: 0x0018, 0xef2: 0x0018, 0xef3: 0x0018, 0xef4: 0x0018, 0xef5: 0x0018,
+ 0xef6: 0xe125, 0xef7: 0x0018, 0xef8: 0x29d5, 0xef9: 0x29f5, 0xefa: 0x2a15, 0xefb: 0x0018,
+ 0xefc: 0x0008, 0xefd: 0x0018, 0xefe: 0x0018, 0xeff: 0x0018,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2b55, 0xf01: 0x2b75, 0xf02: 0x2b95, 0xf03: 0x2bb5, 0xf04: 0x2bd5, 0xf05: 0x2bf5,
+ 0xf06: 0x2bf5, 0xf07: 0x2bf5, 0xf08: 0x2c15, 0xf09: 0x2c15, 0xf0a: 0x2c15, 0xf0b: 0x2c15,
+ 0xf0c: 0x2c35, 0xf0d: 0x2c35, 0xf0e: 0x2c35, 0xf0f: 0x2c55, 0xf10: 0x2c75, 0xf11: 0x2c75,
+ 0xf12: 0x2a95, 0xf13: 0x2a95, 0xf14: 0x2c75, 0xf15: 0x2c75, 0xf16: 0x2c95, 0xf17: 0x2c95,
+ 0xf18: 0x2c75, 0xf19: 0x2c75, 0xf1a: 0x2a95, 0xf1b: 0x2a95, 0xf1c: 0x2c75, 0xf1d: 0x2c75,
+ 0xf1e: 0x2c55, 0xf1f: 0x2c55, 0xf20: 0x2cb5, 0xf21: 0x2cb5, 0xf22: 0x2cd5, 0xf23: 0x2cd5,
+ 0xf24: 0x0040, 0xf25: 0x2cf5, 0xf26: 0x2d15, 0xf27: 0x2d35, 0xf28: 0x2d35, 0xf29: 0x2d55,
+ 0xf2a: 0x2d75, 0xf2b: 0x2d95, 0xf2c: 0x2db5, 0xf2d: 0x2dd5, 0xf2e: 0x2df5, 0xf2f: 0x2e15,
+ 0xf30: 0x2e35, 0xf31: 0x2e55, 0xf32: 0x2e55, 0xf33: 0x2e75, 0xf34: 0x2e95, 0xf35: 0x2e95,
+ 0xf36: 0x2eb5, 0xf37: 0x2ed5, 0xf38: 0x2e75, 0xf39: 0x2ef5, 0xf3a: 0x2f15, 0xf3b: 0x2ef5,
+ 0xf3c: 0x2e75, 0xf3d: 0x2f35, 0xf3e: 0x2f55, 0xf3f: 0x2f75,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x2f95, 0xf41: 0x2fb5, 0xf42: 0x2d15, 0xf43: 0x2cf5, 0xf44: 0x2fd5, 0xf45: 0x2ff5,
+ 0xf46: 0x3015, 0xf47: 0x3035, 0xf48: 0x3055, 0xf49: 0x3075, 0xf4a: 0x3095, 0xf4b: 0x30b5,
+ 0xf4c: 0x30d5, 0xf4d: 0x30f5, 0xf4e: 0x3115, 0xf4f: 0x0040, 0xf50: 0x0018, 0xf51: 0x0018,
+ 0xf52: 0x3135, 0xf53: 0x3155, 0xf54: 0x3175, 0xf55: 0x3195, 0xf56: 0x31b5, 0xf57: 0x31d5,
+ 0xf58: 0x31f5, 0xf59: 0x3215, 0xf5a: 0x3235, 0xf5b: 0x3255, 0xf5c: 0x3175, 0xf5d: 0x3275,
+ 0xf5e: 0x3295, 0xf5f: 0x32b5, 0xf60: 0x0008, 0xf61: 0x0008, 0xf62: 0x0008, 0xf63: 0x0008,
+ 0xf64: 0x0008, 0xf65: 0x0008, 0xf66: 0x0008, 0xf67: 0x0008, 0xf68: 0x0008, 0xf69: 0x0008,
+ 0xf6a: 0x0008, 0xf6b: 0x0008, 0xf6c: 0x0008, 0xf6d: 0x0008, 0xf6e: 0x0008, 0xf6f: 0x0008,
+ 0xf70: 0x0008, 0xf71: 0x0008, 0xf72: 0x0008, 0xf73: 0x0008, 0xf74: 0x0008, 0xf75: 0x0008,
+ 0xf76: 0x0008, 0xf77: 0x0008, 0xf78: 0x0008, 0xf79: 0x0008, 0xf7a: 0x0008, 0xf7b: 0x0008,
+ 0xf7c: 0x0008, 0xf7d: 0x0008, 0xf7e: 0x0008, 0xf7f: 0x0008,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x0b82, 0xf81: 0x0b8a, 0xf82: 0x0b92, 0xf83: 0x0b9a, 0xf84: 0x32d5, 0xf85: 0x32f5,
+ 0xf86: 0x3315, 0xf87: 0x3335, 0xf88: 0x0018, 0xf89: 0x0018, 0xf8a: 0x0018, 0xf8b: 0x0018,
+ 0xf8c: 0x0018, 0xf8d: 0x0018, 0xf8e: 0x0018, 0xf8f: 0x0018, 0xf90: 0x3355, 0xf91: 0x0ba1,
+ 0xf92: 0x0ba9, 0xf93: 0x0bb1, 0xf94: 0x0bb9, 0xf95: 0x0bc1, 0xf96: 0x0bc9, 0xf97: 0x0bd1,
+ 0xf98: 0x0bd9, 0xf99: 0x0be1, 0xf9a: 0x0be9, 0xf9b: 0x0bf1, 0xf9c: 0x0bf9, 0xf9d: 0x0c01,
+ 0xf9e: 0x0c09, 0xf9f: 0x0c11, 0xfa0: 0x3375, 0xfa1: 0x3395, 0xfa2: 0x33b5, 0xfa3: 0x33d5,
+ 0xfa4: 0x33f5, 0xfa5: 0x33f5, 0xfa6: 0x3415, 0xfa7: 0x3435, 0xfa8: 0x3455, 0xfa9: 0x3475,
+ 0xfaa: 0x3495, 0xfab: 0x34b5, 0xfac: 0x34d5, 0xfad: 0x34f5, 0xfae: 0x3515, 0xfaf: 0x3535,
+ 0xfb0: 0x3555, 0xfb1: 0x3575, 0xfb2: 0x3595, 0xfb3: 0x35b5, 0xfb4: 0x35d5, 0xfb5: 0x35f5,
+ 0xfb6: 0x3615, 0xfb7: 0x3635, 0xfb8: 0x3655, 0xfb9: 0x3675, 0xfba: 0x3695, 0xfbb: 0x36b5,
+ 0xfbc: 0x0c19, 0xfbd: 0x0c21, 0xfbe: 0x36d5, 0xfbf: 0x0018,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x36f5, 0xfc1: 0x3715, 0xfc2: 0x3735, 0xfc3: 0x3755, 0xfc4: 0x3775, 0xfc5: 0x3795,
+ 0xfc6: 0x37b5, 0xfc7: 0x37d5, 0xfc8: 0x37f5, 0xfc9: 0x3815, 0xfca: 0x3835, 0xfcb: 0x3855,
+ 0xfcc: 0x3875, 0xfcd: 0x3895, 0xfce: 0x38b5, 0xfcf: 0x38d5, 0xfd0: 0x38f5, 0xfd1: 0x3915,
+ 0xfd2: 0x3935, 0xfd3: 0x3955, 0xfd4: 0x3975, 0xfd5: 0x3995, 0xfd6: 0x39b5, 0xfd7: 0x39d5,
+ 0xfd8: 0x39f5, 0xfd9: 0x3a15, 0xfda: 0x3a35, 0xfdb: 0x3a55, 0xfdc: 0x3a75, 0xfdd: 0x3a95,
+ 0xfde: 0x3ab5, 0xfdf: 0x3ad5, 0xfe0: 0x3af5, 0xfe1: 0x3b15, 0xfe2: 0x3b35, 0xfe3: 0x3b55,
+ 0xfe4: 0x3b75, 0xfe5: 0x3b95, 0xfe6: 0x1295, 0xfe7: 0x3bb5, 0xfe8: 0x3bd5, 0xfe9: 0x3bf5,
+ 0xfea: 0x3c15, 0xfeb: 0x3c35, 0xfec: 0x3c55, 0xfed: 0x3c75, 0xfee: 0x23b5, 0xfef: 0x3c95,
+ 0xff0: 0x3cb5, 0xff1: 0x0c29, 0xff2: 0x0c31, 0xff3: 0x0c39, 0xff4: 0x0c41, 0xff5: 0x0c49,
+ 0xff6: 0x0c51, 0xff7: 0x0c59, 0xff8: 0x0c61, 0xff9: 0x0c69, 0xffa: 0x0c71, 0xffb: 0x0c79,
+ 0xffc: 0x0c81, 0xffd: 0x0c89, 0xffe: 0x0c91, 0xfff: 0x0c99,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x0ca1, 0x1001: 0x0ca9, 0x1002: 0x0cb1, 0x1003: 0x0cb9, 0x1004: 0x0cc1, 0x1005: 0x0cc9,
+ 0x1006: 0x0cd1, 0x1007: 0x0cd9, 0x1008: 0x0ce1, 0x1009: 0x0ce9, 0x100a: 0x0cf1, 0x100b: 0x0cf9,
+ 0x100c: 0x0d01, 0x100d: 0x3cd5, 0x100e: 0x0d09, 0x100f: 0x3cf5, 0x1010: 0x3d15, 0x1011: 0x3d2d,
+ 0x1012: 0x3d45, 0x1013: 0x3d5d, 0x1014: 0x3d75, 0x1015: 0x3d75, 0x1016: 0x3d5d, 0x1017: 0x3d8d,
+ 0x1018: 0x07d5, 0x1019: 0x3da5, 0x101a: 0x3dbd, 0x101b: 0x3dd5, 0x101c: 0x3ded, 0x101d: 0x3e05,
+ 0x101e: 0x3e1d, 0x101f: 0x3e35, 0x1020: 0x3e4d, 0x1021: 0x3e65, 0x1022: 0x3e7d, 0x1023: 0x3e95,
+ 0x1024: 0x3ead, 0x1025: 0x3ead, 0x1026: 0x3ec5, 0x1027: 0x3ec5, 0x1028: 0x3edd, 0x1029: 0x3edd,
+ 0x102a: 0x3ef5, 0x102b: 0x3f0d, 0x102c: 0x3f25, 0x102d: 0x3f3d, 0x102e: 0x3f55, 0x102f: 0x3f55,
+ 0x1030: 0x3f6d, 0x1031: 0x3f6d, 0x1032: 0x3f6d, 0x1033: 0x3f85, 0x1034: 0x3f9d, 0x1035: 0x3fb5,
+ 0x1036: 0x3fcd, 0x1037: 0x3fb5, 0x1038: 0x3fe5, 0x1039: 0x3ffd, 0x103a: 0x3f85, 0x103b: 0x4015,
+ 0x103c: 0x402d, 0x103d: 0x402d, 0x103e: 0x402d, 0x103f: 0x0d11,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x10f9, 0x1041: 0x1101, 0x1042: 0x40a5, 0x1043: 0x1109, 0x1044: 0x1111, 0x1045: 0x1119,
+ 0x1046: 0x1121, 0x1047: 0x1129, 0x1048: 0x40c5, 0x1049: 0x1131, 0x104a: 0x1139, 0x104b: 0x1141,
+ 0x104c: 0x40e5, 0x104d: 0x40e5, 0x104e: 0x1149, 0x104f: 0x1151, 0x1050: 0x1159, 0x1051: 0x4105,
+ 0x1052: 0x4125, 0x1053: 0x4145, 0x1054: 0x4165, 0x1055: 0x4185, 0x1056: 0x1161, 0x1057: 0x1169,
+ 0x1058: 0x1171, 0x1059: 0x1179, 0x105a: 0x1181, 0x105b: 0x41a5, 0x105c: 0x1189, 0x105d: 0x1191,
+ 0x105e: 0x1199, 0x105f: 0x41c5, 0x1060: 0x41e5, 0x1061: 0x11a1, 0x1062: 0x4205, 0x1063: 0x4225,
+ 0x1064: 0x4245, 0x1065: 0x11a9, 0x1066: 0x4265, 0x1067: 0x11b1, 0x1068: 0x11b9, 0x1069: 0x10f9,
+ 0x106a: 0x4285, 0x106b: 0x42a5, 0x106c: 0x42c5, 0x106d: 0x42e5, 0x106e: 0x11c1, 0x106f: 0x11c9,
+ 0x1070: 0x11d1, 0x1071: 0x11d9, 0x1072: 0x4305, 0x1073: 0x11e1, 0x1074: 0x11e9, 0x1075: 0x11f1,
+ 0x1076: 0x4325, 0x1077: 0x11f9, 0x1078: 0x1201, 0x1079: 0x11f9, 0x107a: 0x1209, 0x107b: 0x1211,
+ 0x107c: 0x4345, 0x107d: 0x1219, 0x107e: 0x1221, 0x107f: 0x1219,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x4365, 0x1081: 0x4385, 0x1082: 0x0040, 0x1083: 0x1229, 0x1084: 0x1231, 0x1085: 0x1239,
+ 0x1086: 0x1241, 0x1087: 0x0040, 0x1088: 0x1249, 0x1089: 0x1251, 0x108a: 0x1259, 0x108b: 0x1261,
+ 0x108c: 0x1269, 0x108d: 0x1271, 0x108e: 0x1199, 0x108f: 0x1279, 0x1090: 0x1281, 0x1091: 0x1289,
+ 0x1092: 0x43a5, 0x1093: 0x1291, 0x1094: 0x1121, 0x1095: 0x43c5, 0x1096: 0x43e5, 0x1097: 0x1299,
+ 0x1098: 0x0040, 0x1099: 0x4405, 0x109a: 0x12a1, 0x109b: 0x12a9, 0x109c: 0x12b1, 0x109d: 0x12b9,
+ 0x109e: 0x12c1, 0x109f: 0x12c9, 0x10a0: 0x12d1, 0x10a1: 0x12d9, 0x10a2: 0x12e1, 0x10a3: 0x12e9,
+ 0x10a4: 0x12f1, 0x10a5: 0x12f9, 0x10a6: 0x1301, 0x10a7: 0x1309, 0x10a8: 0x1311, 0x10a9: 0x1319,
+ 0x10aa: 0x1321, 0x10ab: 0x1329, 0x10ac: 0x1331, 0x10ad: 0x1339, 0x10ae: 0x1341, 0x10af: 0x1349,
+ 0x10b0: 0x1351, 0x10b1: 0x1359, 0x10b2: 0x1361, 0x10b3: 0x1369, 0x10b4: 0x1371, 0x10b5: 0x1379,
+ 0x10b6: 0x1381, 0x10b7: 0x1389, 0x10b8: 0x1391, 0x10b9: 0x1399, 0x10ba: 0x13a1, 0x10bb: 0x13a9,
+ 0x10bc: 0x13b1, 0x10bd: 0x13b9, 0x10be: 0x13c1, 0x10bf: 0x4425,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008,
+ 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008,
+ 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008,
+ 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008,
+ 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008,
+ 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008,
+ 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008,
+ 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308,
+ 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308,
+ 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308,
+ 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x02d1, 0x111d: 0x13c9,
+ 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008,
+ 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008,
+ 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008,
+ 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008,
+ 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008,
+ 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018,
+ 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018,
+ 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018,
+ 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008,
+ 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008,
+ 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008,
+ 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008,
+ 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008,
+ 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008,
+ 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008,
+ 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008,
+ 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008,
+ 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008,
+ 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008,
+ 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008,
+ 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d,
+ 0x11bc: 0x0008, 0x11bd: 0x4445, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d,
+ 0x11cc: 0x0008, 0x11cd: 0x0409, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0x13d1, 0x11eb: 0x0371, 0x11ec: 0x0401, 0x11ed: 0x13d9, 0x11ee: 0x0421, 0x11ef: 0x0008,
+ 0x11f0: 0x13e1, 0x11f1: 0x13e9, 0x11f2: 0x0429, 0x11f3: 0x4465, 0x11f4: 0xe00d, 0x11f5: 0x0008,
+ 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0xe00d, 0x11f9: 0x0008, 0x11fa: 0xe00d, 0x11fb: 0x0008,
+ 0x11fc: 0xe00d, 0x11fd: 0x0008, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0x03f5, 0x1205: 0x0479,
+ 0x1206: 0x447d, 0x1207: 0xe07d, 0x1208: 0x0008, 0x1209: 0xe01d, 0x120a: 0x0008, 0x120b: 0x0040,
+ 0x120c: 0x0040, 0x120d: 0x0040, 0x120e: 0x0040, 0x120f: 0x0040, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0x0040, 0x1213: 0x0008, 0x1214: 0x0040, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040,
+ 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040,
+ 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040,
+ 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040,
+ 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x03d9, 0x1233: 0x03f1, 0x1234: 0x0751, 0x1235: 0xe01d,
+ 0x1236: 0x0008, 0x1237: 0x0008, 0x1238: 0x0741, 0x1239: 0x13f1, 0x123a: 0x0008, 0x123b: 0x0008,
+ 0x123c: 0x0008, 0x123d: 0x0008, 0x123e: 0x0008, 0x123f: 0x0008,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad,
+ 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d,
+ 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008,
+ 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d,
+ 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d,
+ 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d,
+ 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d,
+ 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed,
+ 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d,
+ 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d,
+ 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x1409, 0x1290: 0x1411, 0x1291: 0x1419,
+ 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x1421, 0x1296: 0x1429, 0x1297: 0x1431,
+ 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x1439, 0x12c1: 0x1441, 0x12c2: 0x1449, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x1451,
+ 0x12c6: 0x1451, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x1459, 0x12d4: 0x1461, 0x12d5: 0x1469, 0x12d6: 0x1471, 0x12d7: 0x1479,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x1481,
+ 0x12de: 0x3308, 0x12df: 0x1489, 0x12e0: 0x1491, 0x12e1: 0x0779, 0x12e2: 0x0791, 0x12e3: 0x1499,
+ 0x12e4: 0x14a1, 0x12e5: 0x14a9, 0x12e6: 0x14b1, 0x12e7: 0x14b9, 0x12e8: 0x14c1, 0x12e9: 0x071a,
+ 0x12ea: 0x14c9, 0x12eb: 0x14d1, 0x12ec: 0x14d9, 0x12ed: 0x14e1, 0x12ee: 0x14e9, 0x12ef: 0x14f1,
+ 0x12f0: 0x14f9, 0x12f1: 0x1501, 0x12f2: 0x1509, 0x12f3: 0x1511, 0x12f4: 0x1519, 0x12f5: 0x1521,
+ 0x12f6: 0x1529, 0x12f7: 0x0040, 0x12f8: 0x1531, 0x12f9: 0x1539, 0x12fa: 0x1541, 0x12fb: 0x1549,
+ 0x12fc: 0x1551, 0x12fd: 0x0040, 0x12fe: 0x1559, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x1561, 0x1301: 0x1569, 0x1302: 0x0040, 0x1303: 0x1571, 0x1304: 0x1579, 0x1305: 0x0040,
+ 0x1306: 0x1581, 0x1307: 0x1589, 0x1308: 0x1591, 0x1309: 0x1599, 0x130a: 0x15a1, 0x130b: 0x15a9,
+ 0x130c: 0x15b1, 0x130d: 0x15b9, 0x130e: 0x15c1, 0x130f: 0x15c9, 0x1310: 0x15d1, 0x1311: 0x15d1,
+ 0x1312: 0x15d9, 0x1313: 0x15d9, 0x1314: 0x15d9, 0x1315: 0x15d9, 0x1316: 0x15e1, 0x1317: 0x15e1,
+ 0x1318: 0x15e1, 0x1319: 0x15e1, 0x131a: 0x15e9, 0x131b: 0x15e9, 0x131c: 0x15e9, 0x131d: 0x15e9,
+ 0x131e: 0x15f1, 0x131f: 0x15f1, 0x1320: 0x15f1, 0x1321: 0x15f1, 0x1322: 0x15f9, 0x1323: 0x15f9,
+ 0x1324: 0x15f9, 0x1325: 0x15f9, 0x1326: 0x1601, 0x1327: 0x1601, 0x1328: 0x1601, 0x1329: 0x1601,
+ 0x132a: 0x1609, 0x132b: 0x1609, 0x132c: 0x1609, 0x132d: 0x1609, 0x132e: 0x1611, 0x132f: 0x1611,
+ 0x1330: 0x1611, 0x1331: 0x1611, 0x1332: 0x1619, 0x1333: 0x1619, 0x1334: 0x1619, 0x1335: 0x1619,
+ 0x1336: 0x1621, 0x1337: 0x1621, 0x1338: 0x1621, 0x1339: 0x1621, 0x133a: 0x1629, 0x133b: 0x1629,
+ 0x133c: 0x1629, 0x133d: 0x1629, 0x133e: 0x1631, 0x133f: 0x1631,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x1631, 0x1341: 0x1631, 0x1342: 0x1639, 0x1343: 0x1639, 0x1344: 0x1641, 0x1345: 0x1641,
+ 0x1346: 0x1649, 0x1347: 0x1649, 0x1348: 0x1651, 0x1349: 0x1651, 0x134a: 0x1659, 0x134b: 0x1659,
+ 0x134c: 0x1661, 0x134d: 0x1661, 0x134e: 0x1669, 0x134f: 0x1669, 0x1350: 0x1669, 0x1351: 0x1669,
+ 0x1352: 0x1671, 0x1353: 0x1671, 0x1354: 0x1671, 0x1355: 0x1671, 0x1356: 0x1679, 0x1357: 0x1679,
+ 0x1358: 0x1679, 0x1359: 0x1679, 0x135a: 0x1681, 0x135b: 0x1681, 0x135c: 0x1681, 0x135d: 0x1681,
+ 0x135e: 0x1689, 0x135f: 0x1689, 0x1360: 0x1691, 0x1361: 0x1691, 0x1362: 0x1691, 0x1363: 0x1691,
+ 0x1364: 0x1699, 0x1365: 0x1699, 0x1366: 0x16a1, 0x1367: 0x16a1, 0x1368: 0x16a1, 0x1369: 0x16a1,
+ 0x136a: 0x16a9, 0x136b: 0x16a9, 0x136c: 0x16a9, 0x136d: 0x16a9, 0x136e: 0x16b1, 0x136f: 0x16b1,
+ 0x1370: 0x16b9, 0x1371: 0x16b9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0818, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x16c1, 0x1394: 0x16c1, 0x1395: 0x16c1, 0x1396: 0x16c1, 0x1397: 0x16c9,
+ 0x1398: 0x16c9, 0x1399: 0x16d1, 0x139a: 0x16d1, 0x139b: 0x16d9, 0x139c: 0x16d9, 0x139d: 0x0149,
+ 0x139e: 0x16e1, 0x139f: 0x16e1, 0x13a0: 0x16e9, 0x13a1: 0x16e9, 0x13a2: 0x16f1, 0x13a3: 0x16f1,
+ 0x13a4: 0x16f9, 0x13a5: 0x16f9, 0x13a6: 0x16f9, 0x13a7: 0x16f9, 0x13a8: 0x1701, 0x13a9: 0x1701,
+ 0x13aa: 0x1709, 0x13ab: 0x1709, 0x13ac: 0x1711, 0x13ad: 0x1711, 0x13ae: 0x1719, 0x13af: 0x1719,
+ 0x13b0: 0x1721, 0x13b1: 0x1721, 0x13b2: 0x1729, 0x13b3: 0x1729, 0x13b4: 0x1731, 0x13b5: 0x1731,
+ 0x13b6: 0x1739, 0x13b7: 0x1739, 0x13b8: 0x1739, 0x13b9: 0x1741, 0x13ba: 0x1741, 0x13bb: 0x1741,
+ 0x13bc: 0x1749, 0x13bd: 0x1749, 0x13be: 0x1749, 0x13bf: 0x1749,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x1949, 0x13c1: 0x1951, 0x13c2: 0x1959, 0x13c3: 0x1961, 0x13c4: 0x1969, 0x13c5: 0x1971,
+ 0x13c6: 0x1979, 0x13c7: 0x1981, 0x13c8: 0x1989, 0x13c9: 0x1991, 0x13ca: 0x1999, 0x13cb: 0x19a1,
+ 0x13cc: 0x19a9, 0x13cd: 0x19b1, 0x13ce: 0x19b9, 0x13cf: 0x19c1, 0x13d0: 0x19c9, 0x13d1: 0x19d1,
+ 0x13d2: 0x19d9, 0x13d3: 0x19e1, 0x13d4: 0x19e9, 0x13d5: 0x19f1, 0x13d6: 0x19f9, 0x13d7: 0x1a01,
+ 0x13d8: 0x1a09, 0x13d9: 0x1a11, 0x13da: 0x1a19, 0x13db: 0x1a21, 0x13dc: 0x1a29, 0x13dd: 0x1a31,
+ 0x13de: 0x1a3a, 0x13df: 0x1a42, 0x13e0: 0x1a4a, 0x13e1: 0x1a52, 0x13e2: 0x1a5a, 0x13e3: 0x1a62,
+ 0x13e4: 0x1a69, 0x13e5: 0x1a71, 0x13e6: 0x1761, 0x13e7: 0x1a79, 0x13e8: 0x1741, 0x13e9: 0x1769,
+ 0x13ea: 0x1a81, 0x13eb: 0x1a89, 0x13ec: 0x1789, 0x13ed: 0x1a91, 0x13ee: 0x1791, 0x13ef: 0x1799,
+ 0x13f0: 0x1a99, 0x13f1: 0x1aa1, 0x13f2: 0x17b9, 0x13f3: 0x1aa9, 0x13f4: 0x17c1, 0x13f5: 0x17c9,
+ 0x13f6: 0x1ab1, 0x13f7: 0x1ab9, 0x13f8: 0x17d9, 0x13f9: 0x1ac1, 0x13fa: 0x17e1, 0x13fb: 0x17e9,
+ 0x13fc: 0x18d1, 0x13fd: 0x18d9, 0x13fe: 0x18f1, 0x13ff: 0x18f9,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x1901, 0x1401: 0x1921, 0x1402: 0x1929, 0x1403: 0x1931, 0x1404: 0x1939, 0x1405: 0x1959,
+ 0x1406: 0x1961, 0x1407: 0x1969, 0x1408: 0x1ac9, 0x1409: 0x1989, 0x140a: 0x1ad1, 0x140b: 0x1ad9,
+ 0x140c: 0x19b9, 0x140d: 0x1ae1, 0x140e: 0x19c1, 0x140f: 0x19c9, 0x1410: 0x1a31, 0x1411: 0x1ae9,
+ 0x1412: 0x1af1, 0x1413: 0x1a09, 0x1414: 0x1af9, 0x1415: 0x1a11, 0x1416: 0x1a19, 0x1417: 0x1751,
+ 0x1418: 0x1759, 0x1419: 0x1b01, 0x141a: 0x1761, 0x141b: 0x1b09, 0x141c: 0x1771, 0x141d: 0x1779,
+ 0x141e: 0x1781, 0x141f: 0x1789, 0x1420: 0x1b11, 0x1421: 0x17a1, 0x1422: 0x17a9, 0x1423: 0x17b1,
+ 0x1424: 0x17b9, 0x1425: 0x1b19, 0x1426: 0x17d9, 0x1427: 0x17f1, 0x1428: 0x17f9, 0x1429: 0x1801,
+ 0x142a: 0x1809, 0x142b: 0x1811, 0x142c: 0x1821, 0x142d: 0x1829, 0x142e: 0x1831, 0x142f: 0x1839,
+ 0x1430: 0x1841, 0x1431: 0x1849, 0x1432: 0x1b21, 0x1433: 0x1851, 0x1434: 0x1859, 0x1435: 0x1861,
+ 0x1436: 0x1869, 0x1437: 0x1871, 0x1438: 0x1879, 0x1439: 0x1889, 0x143a: 0x1891, 0x143b: 0x1899,
+ 0x143c: 0x18a1, 0x143d: 0x18a9, 0x143e: 0x18b1, 0x143f: 0x18b9,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x18c1, 0x1441: 0x18c9, 0x1442: 0x18e1, 0x1443: 0x18e9, 0x1444: 0x1909, 0x1445: 0x1911,
+ 0x1446: 0x1919, 0x1447: 0x1921, 0x1448: 0x1929, 0x1449: 0x1941, 0x144a: 0x1949, 0x144b: 0x1951,
+ 0x144c: 0x1959, 0x144d: 0x1b29, 0x144e: 0x1971, 0x144f: 0x1979, 0x1450: 0x1981, 0x1451: 0x1989,
+ 0x1452: 0x19a1, 0x1453: 0x19a9, 0x1454: 0x19b1, 0x1455: 0x19b9, 0x1456: 0x1b31, 0x1457: 0x19d1,
+ 0x1458: 0x19d9, 0x1459: 0x1b39, 0x145a: 0x19f1, 0x145b: 0x19f9, 0x145c: 0x1a01, 0x145d: 0x1a09,
+ 0x145e: 0x1b41, 0x145f: 0x1761, 0x1460: 0x1b09, 0x1461: 0x1789, 0x1462: 0x1b11, 0x1463: 0x17b9,
+ 0x1464: 0x1b19, 0x1465: 0x17d9, 0x1466: 0x1b49, 0x1467: 0x1841, 0x1468: 0x1b51, 0x1469: 0x1b59,
+ 0x146a: 0x1b61, 0x146b: 0x1921, 0x146c: 0x1929, 0x146d: 0x1959, 0x146e: 0x19b9, 0x146f: 0x1b31,
+ 0x1470: 0x1a09, 0x1471: 0x1b41, 0x1472: 0x1b69, 0x1473: 0x1b71, 0x1474: 0x1b79, 0x1475: 0x1b81,
+ 0x1476: 0x1b89, 0x1477: 0x1b91, 0x1478: 0x1b99, 0x1479: 0x1ba1, 0x147a: 0x1ba9, 0x147b: 0x1bb1,
+ 0x147c: 0x1bb9, 0x147d: 0x1bc1, 0x147e: 0x1bc9, 0x147f: 0x1bd1,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x1bd9, 0x1481: 0x1be1, 0x1482: 0x1be9, 0x1483: 0x1bf1, 0x1484: 0x1bf9, 0x1485: 0x1c01,
+ 0x1486: 0x1c09, 0x1487: 0x1c11, 0x1488: 0x1c19, 0x1489: 0x1c21, 0x148a: 0x1c29, 0x148b: 0x1c31,
+ 0x148c: 0x1b59, 0x148d: 0x1c39, 0x148e: 0x1c41, 0x148f: 0x1c49, 0x1490: 0x1c51, 0x1491: 0x1b81,
+ 0x1492: 0x1b89, 0x1493: 0x1b91, 0x1494: 0x1b99, 0x1495: 0x1ba1, 0x1496: 0x1ba9, 0x1497: 0x1bb1,
+ 0x1498: 0x1bb9, 0x1499: 0x1bc1, 0x149a: 0x1bc9, 0x149b: 0x1bd1, 0x149c: 0x1bd9, 0x149d: 0x1be1,
+ 0x149e: 0x1be9, 0x149f: 0x1bf1, 0x14a0: 0x1bf9, 0x14a1: 0x1c01, 0x14a2: 0x1c09, 0x14a3: 0x1c11,
+ 0x14a4: 0x1c19, 0x14a5: 0x1c21, 0x14a6: 0x1c29, 0x14a7: 0x1c31, 0x14a8: 0x1b59, 0x14a9: 0x1c39,
+ 0x14aa: 0x1c41, 0x14ab: 0x1c49, 0x14ac: 0x1c51, 0x14ad: 0x1c21, 0x14ae: 0x1c29, 0x14af: 0x1c31,
+ 0x14b0: 0x1b59, 0x14b1: 0x1b51, 0x14b2: 0x1b61, 0x14b3: 0x1881, 0x14b4: 0x1829, 0x14b5: 0x1831,
+ 0x14b6: 0x1839, 0x14b7: 0x1c21, 0x14b8: 0x1c29, 0x14b9: 0x1c31, 0x14ba: 0x1881, 0x14bb: 0x1889,
+ 0x14bc: 0x1c59, 0x14bd: 0x1c59, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0018, 0x14c1: 0x0018, 0x14c2: 0x0018, 0x14c3: 0x0018, 0x14c4: 0x0018, 0x14c5: 0x0018,
+ 0x14c6: 0x0018, 0x14c7: 0x0018, 0x14c8: 0x0018, 0x14c9: 0x0018, 0x14ca: 0x0018, 0x14cb: 0x0018,
+ 0x14cc: 0x0018, 0x14cd: 0x0018, 0x14ce: 0x0018, 0x14cf: 0x0018, 0x14d0: 0x1c61, 0x14d1: 0x1c69,
+ 0x14d2: 0x1c69, 0x14d3: 0x1c71, 0x14d4: 0x1c79, 0x14d5: 0x1c81, 0x14d6: 0x1c89, 0x14d7: 0x1c91,
+ 0x14d8: 0x1c99, 0x14d9: 0x1c99, 0x14da: 0x1ca1, 0x14db: 0x1ca9, 0x14dc: 0x1cb1, 0x14dd: 0x1cb9,
+ 0x14de: 0x1cc1, 0x14df: 0x1cc9, 0x14e0: 0x1cc9, 0x14e1: 0x1cd1, 0x14e2: 0x1cd9, 0x14e3: 0x1cd9,
+ 0x14e4: 0x1ce1, 0x14e5: 0x1ce1, 0x14e6: 0x1ce9, 0x14e7: 0x1cf1, 0x14e8: 0x1cf1, 0x14e9: 0x1cf9,
+ 0x14ea: 0x1d01, 0x14eb: 0x1d01, 0x14ec: 0x1d09, 0x14ed: 0x1d09, 0x14ee: 0x1d11, 0x14ef: 0x1d19,
+ 0x14f0: 0x1d19, 0x14f1: 0x1d21, 0x14f2: 0x1d21, 0x14f3: 0x1d29, 0x14f4: 0x1d31, 0x14f5: 0x1d39,
+ 0x14f6: 0x1d41, 0x14f7: 0x1d41, 0x14f8: 0x1d49, 0x14f9: 0x1d51, 0x14fa: 0x1d59, 0x14fb: 0x1d61,
+ 0x14fc: 0x1d69, 0x14fd: 0x1d69, 0x14fe: 0x1d71, 0x14ff: 0x1d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x1f29, 0x1501: 0x1f31, 0x1502: 0x1f39, 0x1503: 0x1f11, 0x1504: 0x1d39, 0x1505: 0x1ce9,
+ 0x1506: 0x1f41, 0x1507: 0x1f49, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0018, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0x1f51, 0x1531: 0x1f59, 0x1532: 0x1f61, 0x1533: 0x1f69, 0x1534: 0x1f71, 0x1535: 0x1f79,
+ 0x1536: 0x1f81, 0x1537: 0x1f89, 0x1538: 0x1f91, 0x1539: 0x1f99, 0x153a: 0x1fa2, 0x153b: 0x1faa,
+ 0x153c: 0x1fb1, 0x153d: 0x0018, 0x153e: 0x0018, 0x153f: 0x0018,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0x1fba, 0x1551: 0x7d8d,
+ 0x1552: 0x0040, 0x1553: 0x1fc2, 0x1554: 0x0122, 0x1555: 0x1fca, 0x1556: 0x1fd2, 0x1557: 0x7dad,
+ 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0x1fda, 0x1574: 0x1fda, 0x1575: 0x072a,
+ 0x1576: 0x0732, 0x1577: 0x1fe2, 0x1578: 0x1fea, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d,
+ 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0x1ff2, 0x1588: 0x1ffa, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e,
+ 0x158c: 0x7fae, 0x158d: 0x1fda, 0x158e: 0x1fda, 0x158f: 0x1fda, 0x1590: 0x1fba, 0x1591: 0x7fcd,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x0122, 0x1595: 0x1fc2, 0x1596: 0x1fd2, 0x1597: 0x1fca,
+ 0x1598: 0x7fed, 0x1599: 0x072a, 0x159a: 0x0732, 0x159b: 0x1fe2, 0x159c: 0x1fea, 0x159d: 0x7ecd,
+ 0x159e: 0x7f2d, 0x159f: 0x2002, 0x15a0: 0x200a, 0x15a1: 0x2012, 0x15a2: 0x071a, 0x15a3: 0x2019,
+ 0x15a4: 0x2022, 0x15a5: 0x202a, 0x15a6: 0x0722, 0x15a7: 0x0040, 0x15a8: 0x2032, 0x15a9: 0x203a,
+ 0x15aa: 0x2042, 0x15ab: 0x204a, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x800e, 0x15b1: 0x2051, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040,
+ 0x15b6: 0x806e, 0x15b7: 0x2059, 0x15b8: 0x808e, 0x15b9: 0x2061, 0x15ba: 0x80ae, 0x15bb: 0x2069,
+ 0x15bc: 0x80ce, 0x15bd: 0x2071, 0x15be: 0x80ee, 0x15bf: 0x2079,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0x2081, 0x15c1: 0x2089, 0x15c2: 0x2089, 0x15c3: 0x2091, 0x15c4: 0x2091, 0x15c5: 0x2099,
+ 0x15c6: 0x2099, 0x15c7: 0x20a1, 0x15c8: 0x20a1, 0x15c9: 0x20a9, 0x15ca: 0x20a9, 0x15cb: 0x20a9,
+ 0x15cc: 0x20a9, 0x15cd: 0x20b1, 0x15ce: 0x20b1, 0x15cf: 0x20b9, 0x15d0: 0x20b9, 0x15d1: 0x20b9,
+ 0x15d2: 0x20b9, 0x15d3: 0x20c1, 0x15d4: 0x20c1, 0x15d5: 0x20c9, 0x15d6: 0x20c9, 0x15d7: 0x20c9,
+ 0x15d8: 0x20c9, 0x15d9: 0x20d1, 0x15da: 0x20d1, 0x15db: 0x20d1, 0x15dc: 0x20d1, 0x15dd: 0x20d9,
+ 0x15de: 0x20d9, 0x15df: 0x20d9, 0x15e0: 0x20d9, 0x15e1: 0x20e1, 0x15e2: 0x20e1, 0x15e3: 0x20e1,
+ 0x15e4: 0x20e1, 0x15e5: 0x20e9, 0x15e6: 0x20e9, 0x15e7: 0x20e9, 0x15e8: 0x20e9, 0x15e9: 0x20f1,
+ 0x15ea: 0x20f1, 0x15eb: 0x20f9, 0x15ec: 0x20f9, 0x15ed: 0x2101, 0x15ee: 0x2101, 0x15ef: 0x2109,
+ 0x15f0: 0x2109, 0x15f1: 0x2111, 0x15f2: 0x2111, 0x15f3: 0x2111, 0x15f4: 0x2111, 0x15f5: 0x2119,
+ 0x15f6: 0x2119, 0x15f7: 0x2119, 0x15f8: 0x2119, 0x15f9: 0x2121, 0x15fa: 0x2121, 0x15fb: 0x2121,
+ 0x15fc: 0x2121, 0x15fd: 0x2129, 0x15fe: 0x2129, 0x15ff: 0x2129,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x2129, 0x1601: 0x2131, 0x1602: 0x2131, 0x1603: 0x2131, 0x1604: 0x2131, 0x1605: 0x2139,
+ 0x1606: 0x2139, 0x1607: 0x2139, 0x1608: 0x2139, 0x1609: 0x2141, 0x160a: 0x2141, 0x160b: 0x2141,
+ 0x160c: 0x2141, 0x160d: 0x2149, 0x160e: 0x2149, 0x160f: 0x2149, 0x1610: 0x2149, 0x1611: 0x2151,
+ 0x1612: 0x2151, 0x1613: 0x2151, 0x1614: 0x2151, 0x1615: 0x2159, 0x1616: 0x2159, 0x1617: 0x2159,
+ 0x1618: 0x2159, 0x1619: 0x2161, 0x161a: 0x2161, 0x161b: 0x2161, 0x161c: 0x2161, 0x161d: 0x2169,
+ 0x161e: 0x2169, 0x161f: 0x2169, 0x1620: 0x2169, 0x1621: 0x2171, 0x1622: 0x2171, 0x1623: 0x2171,
+ 0x1624: 0x2171, 0x1625: 0x2179, 0x1626: 0x2179, 0x1627: 0x2179, 0x1628: 0x2179, 0x1629: 0x2181,
+ 0x162a: 0x2181, 0x162b: 0x2181, 0x162c: 0x2181, 0x162d: 0x2189, 0x162e: 0x2189, 0x162f: 0x1701,
+ 0x1630: 0x1701, 0x1631: 0x2191, 0x1632: 0x2191, 0x1633: 0x2191, 0x1634: 0x2191, 0x1635: 0x2199,
+ 0x1636: 0x2199, 0x1637: 0x21a1, 0x1638: 0x21a1, 0x1639: 0x21a9, 0x163a: 0x21a9, 0x163b: 0x21b1,
+ 0x163c: 0x21b1, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0x1fca, 0x1642: 0x21ba, 0x1643: 0x2002, 0x1644: 0x203a, 0x1645: 0x2042,
+ 0x1646: 0x200a, 0x1647: 0x21c2, 0x1648: 0x072a, 0x1649: 0x0732, 0x164a: 0x2012, 0x164b: 0x071a,
+ 0x164c: 0x1fba, 0x164d: 0x2019, 0x164e: 0x0961, 0x164f: 0x21ca, 0x1650: 0x06e1, 0x1651: 0x0049,
+ 0x1652: 0x0029, 0x1653: 0x0031, 0x1654: 0x06e9, 0x1655: 0x06f1, 0x1656: 0x06f9, 0x1657: 0x0701,
+ 0x1658: 0x0709, 0x1659: 0x0711, 0x165a: 0x1fc2, 0x165b: 0x0122, 0x165c: 0x2022, 0x165d: 0x0722,
+ 0x165e: 0x202a, 0x165f: 0x1fd2, 0x1660: 0x204a, 0x1661: 0x0019, 0x1662: 0x02e9, 0x1663: 0x03d9,
+ 0x1664: 0x02f1, 0x1665: 0x02f9, 0x1666: 0x03f1, 0x1667: 0x0309, 0x1668: 0x00a9, 0x1669: 0x0311,
+ 0x166a: 0x00b1, 0x166b: 0x0319, 0x166c: 0x0101, 0x166d: 0x0321, 0x166e: 0x0329, 0x166f: 0x0051,
+ 0x1670: 0x0339, 0x1671: 0x0751, 0x1672: 0x00b9, 0x1673: 0x0089, 0x1674: 0x0341, 0x1675: 0x0349,
+ 0x1676: 0x0391, 0x1677: 0x00c1, 0x1678: 0x0109, 0x1679: 0x00c9, 0x167a: 0x04b1, 0x167b: 0x1ff2,
+ 0x167c: 0x2032, 0x167d: 0x1ffa, 0x167e: 0x21d2, 0x167f: 0x1fda,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x0672, 0x1681: 0x0019, 0x1682: 0x02e9, 0x1683: 0x03d9, 0x1684: 0x02f1, 0x1685: 0x02f9,
+ 0x1686: 0x03f1, 0x1687: 0x0309, 0x1688: 0x00a9, 0x1689: 0x0311, 0x168a: 0x00b1, 0x168b: 0x0319,
+ 0x168c: 0x0101, 0x168d: 0x0321, 0x168e: 0x0329, 0x168f: 0x0051, 0x1690: 0x0339, 0x1691: 0x0751,
+ 0x1692: 0x00b9, 0x1693: 0x0089, 0x1694: 0x0341, 0x1695: 0x0349, 0x1696: 0x0391, 0x1697: 0x00c1,
+ 0x1698: 0x0109, 0x1699: 0x00c9, 0x169a: 0x04b1, 0x169b: 0x1fe2, 0x169c: 0x21da, 0x169d: 0x1fea,
+ 0x169e: 0x21e2, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x0961, 0x16a2: 0x814d, 0x16a3: 0x814d,
+ 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d,
+ 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd,
+ 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d,
+ 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d,
+ 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d,
+ 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd,
+ 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d,
+ 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d,
+ 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d,
+ 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d,
+ 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed,
+ 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d,
+ 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed,
+ 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d,
+ 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d,
+ 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d,
+ 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x21e9, 0x1721: 0x21f1, 0x1722: 0x21f9, 0x1723: 0x8a0e,
+ 0x1724: 0x2201, 0x1725: 0x2209, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d,
+ 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0008, 0x1741: 0x0008, 0x1742: 0x0008, 0x1743: 0x0008, 0x1744: 0x0008, 0x1745: 0x0008,
+ 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008,
+ 0x174c: 0x0008, 0x174d: 0x0008, 0x174e: 0x0008, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0008,
+ 0x1752: 0x0008, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008,
+ 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008,
+ 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008,
+ 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0040, 0x176c: 0x0040, 0x176d: 0x0040, 0x176e: 0x0040, 0x176f: 0x0018,
+ 0x1770: 0x8b3d, 0x1771: 0x8b55, 0x1772: 0x8b6d, 0x1773: 0x8b55, 0x1774: 0x8b85, 0x1775: 0x8b55,
+ 0x1776: 0x8b6d, 0x1777: 0x8b55, 0x1778: 0x8b3d, 0x1779: 0x8b9d, 0x177a: 0x8bb5, 0x177b: 0x0040,
+ 0x177c: 0x8bcd, 0x177d: 0x8b9d, 0x177e: 0x8bb5, 0x177f: 0x8b9d,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0xe13d, 0x1781: 0xe14d, 0x1782: 0xe15d, 0x1783: 0xe14d, 0x1784: 0xe17d, 0x1785: 0xe14d,
+ 0x1786: 0xe15d, 0x1787: 0xe14d, 0x1788: 0xe13d, 0x1789: 0xe1cd, 0x178a: 0xe1dd, 0x178b: 0x0040,
+ 0x178c: 0xe1fd, 0x178d: 0xe1cd, 0x178e: 0xe1dd, 0x178f: 0xe1cd, 0x1790: 0xe13d, 0x1791: 0xe14d,
+ 0x1792: 0xe15d, 0x1793: 0x0040, 0x1794: 0xe17d, 0x1795: 0xe14d, 0x1796: 0x0040, 0x1797: 0x0008,
+ 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008,
+ 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0040, 0x17a3: 0x0008,
+ 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0008,
+ 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008,
+ 0x17b0: 0x0008, 0x17b1: 0x0008, 0x17b2: 0x0040, 0x17b3: 0x0008, 0x17b4: 0x0008, 0x17b5: 0x0008,
+ 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0008,
+ 0x17bc: 0x0008, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x0008, 0x17c1: 0x2211, 0x17c2: 0x2219, 0x17c3: 0x02e1, 0x17c4: 0x2221, 0x17c5: 0x2229,
+ 0x17c6: 0x0040, 0x17c7: 0x2231, 0x17c8: 0x2239, 0x17c9: 0x2241, 0x17ca: 0x2249, 0x17cb: 0x2251,
+ 0x17cc: 0x2259, 0x17cd: 0x2261, 0x17ce: 0x2269, 0x17cf: 0x2271, 0x17d0: 0x2279, 0x17d1: 0x2281,
+ 0x17d2: 0x2289, 0x17d3: 0x2291, 0x17d4: 0x2299, 0x17d5: 0x0741, 0x17d6: 0x22a1, 0x17d7: 0x22a9,
+ 0x17d8: 0x22b1, 0x17d9: 0x22b9, 0x17da: 0x22c1, 0x17db: 0x13d9, 0x17dc: 0x8be5, 0x17dd: 0x22c9,
+ 0x17de: 0x22d1, 0x17df: 0x8c05, 0x17e0: 0x22d9, 0x17e1: 0x8c25, 0x17e2: 0x22e1, 0x17e3: 0x22e9,
+ 0x17e4: 0x22f1, 0x17e5: 0x0751, 0x17e6: 0x22f9, 0x17e7: 0x8c45, 0x17e8: 0x0949, 0x17e9: 0x2301,
+ 0x17ea: 0x2309, 0x17eb: 0x2311, 0x17ec: 0x2319, 0x17ed: 0x2321, 0x17ee: 0x2329, 0x17ef: 0x2331,
+ 0x17f0: 0x2339, 0x17f1: 0x0040, 0x17f2: 0x2341, 0x17f3: 0x2349, 0x17f4: 0x2351, 0x17f5: 0x2359,
+ 0x17f6: 0x2361, 0x17f7: 0x2369, 0x17f8: 0x2371, 0x17f9: 0x8c65, 0x17fa: 0x8c85, 0x17fb: 0x0040,
+ 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x0a08, 0x1801: 0x0a08, 0x1802: 0x0a08, 0x1803: 0x0a08, 0x1804: 0x0a08, 0x1805: 0x0c08,
+ 0x1806: 0x0808, 0x1807: 0x0c08, 0x1808: 0x0818, 0x1809: 0x0c08, 0x180a: 0x0c08, 0x180b: 0x0808,
+ 0x180c: 0x0808, 0x180d: 0x0908, 0x180e: 0x0c08, 0x180f: 0x0c08, 0x1810: 0x0c08, 0x1811: 0x0c08,
+ 0x1812: 0x0c08, 0x1813: 0x0a08, 0x1814: 0x0a08, 0x1815: 0x0a08, 0x1816: 0x0a08, 0x1817: 0x0908,
+ 0x1818: 0x0a08, 0x1819: 0x0a08, 0x181a: 0x0a08, 0x181b: 0x0a08, 0x181c: 0x0a08, 0x181d: 0x0c08,
+ 0x181e: 0x0a08, 0x181f: 0x0a08, 0x1820: 0x0a08, 0x1821: 0x0c08, 0x1822: 0x0808, 0x1823: 0x0808,
+ 0x1824: 0x0c08, 0x1825: 0x3308, 0x1826: 0x3308, 0x1827: 0x0040, 0x1828: 0x0040, 0x1829: 0x0040,
+ 0x182a: 0x0040, 0x182b: 0x0a18, 0x182c: 0x0a18, 0x182d: 0x0a18, 0x182e: 0x0a18, 0x182f: 0x0c18,
+ 0x1830: 0x0818, 0x1831: 0x0818, 0x1832: 0x0818, 0x1833: 0x0818, 0x1834: 0x0818, 0x1835: 0x0818,
+ 0x1836: 0x0818, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0a08, 0x1841: 0x0c08, 0x1842: 0x0a08, 0x1843: 0x0c08, 0x1844: 0x0c08, 0x1845: 0x0c08,
+ 0x1846: 0x0a08, 0x1847: 0x0a08, 0x1848: 0x0a08, 0x1849: 0x0c08, 0x184a: 0x0a08, 0x184b: 0x0a08,
+ 0x184c: 0x0c08, 0x184d: 0x0a08, 0x184e: 0x0c08, 0x184f: 0x0c08, 0x1850: 0x0a08, 0x1851: 0x0c08,
+ 0x1852: 0x0040, 0x1853: 0x0040, 0x1854: 0x0040, 0x1855: 0x0040, 0x1856: 0x0040, 0x1857: 0x0040,
+ 0x1858: 0x0040, 0x1859: 0x0818, 0x185a: 0x0818, 0x185b: 0x0818, 0x185c: 0x0818, 0x185d: 0x0040,
+ 0x185e: 0x0040, 0x185f: 0x0040, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0040, 0x1863: 0x0040,
+ 0x1864: 0x0040, 0x1865: 0x0040, 0x1866: 0x0040, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0c18,
+ 0x186a: 0x0c18, 0x186b: 0x0c18, 0x186c: 0x0c18, 0x186d: 0x0a18, 0x186e: 0x0a18, 0x186f: 0x0818,
+ 0x1870: 0x0040, 0x1871: 0x0040, 0x1872: 0x0040, 0x1873: 0x0040, 0x1874: 0x0040, 0x1875: 0x0040,
+ 0x1876: 0x0040, 0x1877: 0x0040, 0x1878: 0x0040, 0x1879: 0x0040, 0x187a: 0x0040, 0x187b: 0x0040,
+ 0x187c: 0x0040, 0x187d: 0x0040, 0x187e: 0x0040, 0x187f: 0x0040,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x3308, 0x1881: 0x3308, 0x1882: 0x3008, 0x1883: 0x3008, 0x1884: 0x0040, 0x1885: 0x0008,
+ 0x1886: 0x0008, 0x1887: 0x0008, 0x1888: 0x0008, 0x1889: 0x0008, 0x188a: 0x0008, 0x188b: 0x0008,
+ 0x188c: 0x0008, 0x188d: 0x0040, 0x188e: 0x0040, 0x188f: 0x0008, 0x1890: 0x0008, 0x1891: 0x0040,
+ 0x1892: 0x0040, 0x1893: 0x0008, 0x1894: 0x0008, 0x1895: 0x0008, 0x1896: 0x0008, 0x1897: 0x0008,
+ 0x1898: 0x0008, 0x1899: 0x0008, 0x189a: 0x0008, 0x189b: 0x0008, 0x189c: 0x0008, 0x189d: 0x0008,
+ 0x189e: 0x0008, 0x189f: 0x0008, 0x18a0: 0x0008, 0x18a1: 0x0008, 0x18a2: 0x0008, 0x18a3: 0x0008,
+ 0x18a4: 0x0008, 0x18a5: 0x0008, 0x18a6: 0x0008, 0x18a7: 0x0008, 0x18a8: 0x0008, 0x18a9: 0x0040,
+ 0x18aa: 0x0008, 0x18ab: 0x0008, 0x18ac: 0x0008, 0x18ad: 0x0008, 0x18ae: 0x0008, 0x18af: 0x0008,
+ 0x18b0: 0x0008, 0x18b1: 0x0040, 0x18b2: 0x0008, 0x18b3: 0x0008, 0x18b4: 0x0040, 0x18b5: 0x0008,
+ 0x18b6: 0x0008, 0x18b7: 0x0008, 0x18b8: 0x0008, 0x18b9: 0x0008, 0x18ba: 0x0040, 0x18bb: 0x3308,
+ 0x18bc: 0x3308, 0x18bd: 0x0008, 0x18be: 0x3008, 0x18bf: 0x3008,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x3308, 0x18c1: 0x3008, 0x18c2: 0x3008, 0x18c3: 0x3008, 0x18c4: 0x3008, 0x18c5: 0x0040,
+ 0x18c6: 0x0040, 0x18c7: 0x3008, 0x18c8: 0x3008, 0x18c9: 0x0040, 0x18ca: 0x0040, 0x18cb: 0x3008,
+ 0x18cc: 0x3008, 0x18cd: 0x3808, 0x18ce: 0x0040, 0x18cf: 0x0040, 0x18d0: 0x0008, 0x18d1: 0x0040,
+ 0x18d2: 0x0040, 0x18d3: 0x0040, 0x18d4: 0x0040, 0x18d5: 0x0040, 0x18d6: 0x0040, 0x18d7: 0x3008,
+ 0x18d8: 0x0040, 0x18d9: 0x0040, 0x18da: 0x0040, 0x18db: 0x0040, 0x18dc: 0x0040, 0x18dd: 0x0008,
+ 0x18de: 0x0008, 0x18df: 0x0008, 0x18e0: 0x0008, 0x18e1: 0x0008, 0x18e2: 0x3008, 0x18e3: 0x3008,
+ 0x18e4: 0x0040, 0x18e5: 0x0040, 0x18e6: 0x3308, 0x18e7: 0x3308, 0x18e8: 0x3308, 0x18e9: 0x3308,
+ 0x18ea: 0x3308, 0x18eb: 0x3308, 0x18ec: 0x3308, 0x18ed: 0x0040, 0x18ee: 0x0040, 0x18ef: 0x0040,
+ 0x18f0: 0x3308, 0x18f1: 0x3308, 0x18f2: 0x3308, 0x18f3: 0x3308, 0x18f4: 0x3308, 0x18f5: 0x0040,
+ 0x18f6: 0x0040, 0x18f7: 0x0040, 0x18f8: 0x0040, 0x18f9: 0x0040, 0x18fa: 0x0040, 0x18fb: 0x0040,
+ 0x18fc: 0x0040, 0x18fd: 0x0040, 0x18fe: 0x0040, 0x18ff: 0x0040,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0008, 0x1901: 0x0008, 0x1902: 0x0008, 0x1903: 0x0008, 0x1904: 0x0008, 0x1905: 0x0008,
+ 0x1906: 0x0008, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0008, 0x190a: 0x0040, 0x190b: 0x0040,
+ 0x190c: 0x0008, 0x190d: 0x0008, 0x190e: 0x0008, 0x190f: 0x0008, 0x1910: 0x0008, 0x1911: 0x0008,
+ 0x1912: 0x0008, 0x1913: 0x0008, 0x1914: 0x0040, 0x1915: 0x0008, 0x1916: 0x0008, 0x1917: 0x0040,
+ 0x1918: 0x0008, 0x1919: 0x0008, 0x191a: 0x0008, 0x191b: 0x0008, 0x191c: 0x0008, 0x191d: 0x0008,
+ 0x191e: 0x0008, 0x191f: 0x0008, 0x1920: 0x0008, 0x1921: 0x0008, 0x1922: 0x0008, 0x1923: 0x0008,
+ 0x1924: 0x0008, 0x1925: 0x0008, 0x1926: 0x0008, 0x1927: 0x0008, 0x1928: 0x0008, 0x1929: 0x0008,
+ 0x192a: 0x0008, 0x192b: 0x0008, 0x192c: 0x0008, 0x192d: 0x0008, 0x192e: 0x0008, 0x192f: 0x0008,
+ 0x1930: 0x3008, 0x1931: 0x3008, 0x1932: 0x3008, 0x1933: 0x3008, 0x1934: 0x3008, 0x1935: 0x3008,
+ 0x1936: 0x0040, 0x1937: 0x3008, 0x1938: 0x3008, 0x1939: 0x0040, 0x193a: 0x0040, 0x193b: 0x3308,
+ 0x193c: 0x3308, 0x193d: 0x3808, 0x193e: 0x3b08, 0x193f: 0x0008,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0019, 0x1941: 0x02e9, 0x1942: 0x03d9, 0x1943: 0x02f1, 0x1944: 0x02f9, 0x1945: 0x03f1,
+ 0x1946: 0x0309, 0x1947: 0x00a9, 0x1948: 0x0311, 0x1949: 0x00b1, 0x194a: 0x0319, 0x194b: 0x0101,
+ 0x194c: 0x0321, 0x194d: 0x0329, 0x194e: 0x0051, 0x194f: 0x0339, 0x1950: 0x0751, 0x1951: 0x00b9,
+ 0x1952: 0x0089, 0x1953: 0x0341, 0x1954: 0x0349, 0x1955: 0x0391, 0x1956: 0x00c1, 0x1957: 0x0109,
+ 0x1958: 0x00c9, 0x1959: 0x04b1, 0x195a: 0x0019, 0x195b: 0x02e9, 0x195c: 0x03d9, 0x195d: 0x02f1,
+ 0x195e: 0x02f9, 0x195f: 0x03f1, 0x1960: 0x0309, 0x1961: 0x00a9, 0x1962: 0x0311, 0x1963: 0x00b1,
+ 0x1964: 0x0319, 0x1965: 0x0101, 0x1966: 0x0321, 0x1967: 0x0329, 0x1968: 0x0051, 0x1969: 0x0339,
+ 0x196a: 0x0751, 0x196b: 0x00b9, 0x196c: 0x0089, 0x196d: 0x0341, 0x196e: 0x0349, 0x196f: 0x0391,
+ 0x1970: 0x00c1, 0x1971: 0x0109, 0x1972: 0x00c9, 0x1973: 0x04b1, 0x1974: 0x0019, 0x1975: 0x02e9,
+ 0x1976: 0x03d9, 0x1977: 0x02f1, 0x1978: 0x02f9, 0x1979: 0x03f1, 0x197a: 0x0309, 0x197b: 0x00a9,
+ 0x197c: 0x0311, 0x197d: 0x00b1, 0x197e: 0x0319, 0x197f: 0x0101,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0321, 0x1981: 0x0329, 0x1982: 0x0051, 0x1983: 0x0339, 0x1984: 0x0751, 0x1985: 0x00b9,
+ 0x1986: 0x0089, 0x1987: 0x0341, 0x1988: 0x0349, 0x1989: 0x0391, 0x198a: 0x00c1, 0x198b: 0x0109,
+ 0x198c: 0x00c9, 0x198d: 0x04b1, 0x198e: 0x0019, 0x198f: 0x02e9, 0x1990: 0x03d9, 0x1991: 0x02f1,
+ 0x1992: 0x02f9, 0x1993: 0x03f1, 0x1994: 0x0309, 0x1995: 0x0040, 0x1996: 0x0311, 0x1997: 0x00b1,
+ 0x1998: 0x0319, 0x1999: 0x0101, 0x199a: 0x0321, 0x199b: 0x0329, 0x199c: 0x0051, 0x199d: 0x0339,
+ 0x199e: 0x0751, 0x199f: 0x00b9, 0x19a0: 0x0089, 0x19a1: 0x0341, 0x19a2: 0x0349, 0x19a3: 0x0391,
+ 0x19a4: 0x00c1, 0x19a5: 0x0109, 0x19a6: 0x00c9, 0x19a7: 0x04b1, 0x19a8: 0x0019, 0x19a9: 0x02e9,
+ 0x19aa: 0x03d9, 0x19ab: 0x02f1, 0x19ac: 0x02f9, 0x19ad: 0x03f1, 0x19ae: 0x0309, 0x19af: 0x00a9,
+ 0x19b0: 0x0311, 0x19b1: 0x00b1, 0x19b2: 0x0319, 0x19b3: 0x0101, 0x19b4: 0x0321, 0x19b5: 0x0329,
+ 0x19b6: 0x0051, 0x19b7: 0x0339, 0x19b8: 0x0751, 0x19b9: 0x00b9, 0x19ba: 0x0089, 0x19bb: 0x0341,
+ 0x19bc: 0x0349, 0x19bd: 0x0391, 0x19be: 0x00c1, 0x19bf: 0x0109,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x00c9, 0x19c1: 0x04b1, 0x19c2: 0x0019, 0x19c3: 0x02e9, 0x19c4: 0x03d9, 0x19c5: 0x02f1,
+ 0x19c6: 0x02f9, 0x19c7: 0x03f1, 0x19c8: 0x0309, 0x19c9: 0x00a9, 0x19ca: 0x0311, 0x19cb: 0x00b1,
+ 0x19cc: 0x0319, 0x19cd: 0x0101, 0x19ce: 0x0321, 0x19cf: 0x0329, 0x19d0: 0x0051, 0x19d1: 0x0339,
+ 0x19d2: 0x0751, 0x19d3: 0x00b9, 0x19d4: 0x0089, 0x19d5: 0x0341, 0x19d6: 0x0349, 0x19d7: 0x0391,
+ 0x19d8: 0x00c1, 0x19d9: 0x0109, 0x19da: 0x00c9, 0x19db: 0x04b1, 0x19dc: 0x0019, 0x19dd: 0x0040,
+ 0x19de: 0x03d9, 0x19df: 0x02f1, 0x19e0: 0x0040, 0x19e1: 0x0040, 0x19e2: 0x0309, 0x19e3: 0x0040,
+ 0x19e4: 0x0040, 0x19e5: 0x00b1, 0x19e6: 0x0319, 0x19e7: 0x0040, 0x19e8: 0x0040, 0x19e9: 0x0329,
+ 0x19ea: 0x0051, 0x19eb: 0x0339, 0x19ec: 0x0751, 0x19ed: 0x0040, 0x19ee: 0x0089, 0x19ef: 0x0341,
+ 0x19f0: 0x0349, 0x19f1: 0x0391, 0x19f2: 0x00c1, 0x19f3: 0x0109, 0x19f4: 0x00c9, 0x19f5: 0x04b1,
+ 0x19f6: 0x0019, 0x19f7: 0x02e9, 0x19f8: 0x03d9, 0x19f9: 0x02f1, 0x19fa: 0x0040, 0x19fb: 0x03f1,
+ 0x19fc: 0x0040, 0x19fd: 0x00a9, 0x19fe: 0x0311, 0x19ff: 0x00b1,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0319, 0x1a01: 0x0101, 0x1a02: 0x0321, 0x1a03: 0x0329, 0x1a04: 0x0040, 0x1a05: 0x0339,
+ 0x1a06: 0x0751, 0x1a07: 0x00b9, 0x1a08: 0x0089, 0x1a09: 0x0341, 0x1a0a: 0x0349, 0x1a0b: 0x0391,
+ 0x1a0c: 0x00c1, 0x1a0d: 0x0109, 0x1a0e: 0x00c9, 0x1a0f: 0x04b1, 0x1a10: 0x0019, 0x1a11: 0x02e9,
+ 0x1a12: 0x03d9, 0x1a13: 0x02f1, 0x1a14: 0x02f9, 0x1a15: 0x03f1, 0x1a16: 0x0309, 0x1a17: 0x00a9,
+ 0x1a18: 0x0311, 0x1a19: 0x00b1, 0x1a1a: 0x0319, 0x1a1b: 0x0101, 0x1a1c: 0x0321, 0x1a1d: 0x0329,
+ 0x1a1e: 0x0051, 0x1a1f: 0x0339, 0x1a20: 0x0751, 0x1a21: 0x00b9, 0x1a22: 0x0089, 0x1a23: 0x0341,
+ 0x1a24: 0x0349, 0x1a25: 0x0391, 0x1a26: 0x00c1, 0x1a27: 0x0109, 0x1a28: 0x00c9, 0x1a29: 0x04b1,
+ 0x1a2a: 0x0019, 0x1a2b: 0x02e9, 0x1a2c: 0x03d9, 0x1a2d: 0x02f1, 0x1a2e: 0x02f9, 0x1a2f: 0x03f1,
+ 0x1a30: 0x0309, 0x1a31: 0x00a9, 0x1a32: 0x0311, 0x1a33: 0x00b1, 0x1a34: 0x0319, 0x1a35: 0x0101,
+ 0x1a36: 0x0321, 0x1a37: 0x0329, 0x1a38: 0x0051, 0x1a39: 0x0339, 0x1a3a: 0x0751, 0x1a3b: 0x00b9,
+ 0x1a3c: 0x0089, 0x1a3d: 0x0341, 0x1a3e: 0x0349, 0x1a3f: 0x0391,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x00c1, 0x1a41: 0x0109, 0x1a42: 0x00c9, 0x1a43: 0x04b1, 0x1a44: 0x0019, 0x1a45: 0x02e9,
+ 0x1a46: 0x0040, 0x1a47: 0x02f1, 0x1a48: 0x02f9, 0x1a49: 0x03f1, 0x1a4a: 0x0309, 0x1a4b: 0x0040,
+ 0x1a4c: 0x0040, 0x1a4d: 0x00b1, 0x1a4e: 0x0319, 0x1a4f: 0x0101, 0x1a50: 0x0321, 0x1a51: 0x0329,
+ 0x1a52: 0x0051, 0x1a53: 0x0339, 0x1a54: 0x0751, 0x1a55: 0x0040, 0x1a56: 0x0089, 0x1a57: 0x0341,
+ 0x1a58: 0x0349, 0x1a59: 0x0391, 0x1a5a: 0x00c1, 0x1a5b: 0x0109, 0x1a5c: 0x00c9, 0x1a5d: 0x0040,
+ 0x1a5e: 0x0019, 0x1a5f: 0x02e9, 0x1a60: 0x03d9, 0x1a61: 0x02f1, 0x1a62: 0x02f9, 0x1a63: 0x03f1,
+ 0x1a64: 0x0309, 0x1a65: 0x00a9, 0x1a66: 0x0311, 0x1a67: 0x00b1, 0x1a68: 0x0319, 0x1a69: 0x0101,
+ 0x1a6a: 0x0321, 0x1a6b: 0x0329, 0x1a6c: 0x0051, 0x1a6d: 0x0339, 0x1a6e: 0x0751, 0x1a6f: 0x00b9,
+ 0x1a70: 0x0089, 0x1a71: 0x0341, 0x1a72: 0x0349, 0x1a73: 0x0391, 0x1a74: 0x00c1, 0x1a75: 0x0109,
+ 0x1a76: 0x00c9, 0x1a77: 0x04b1, 0x1a78: 0x0019, 0x1a79: 0x02e9, 0x1a7a: 0x0040, 0x1a7b: 0x02f1,
+ 0x1a7c: 0x02f9, 0x1a7d: 0x03f1, 0x1a7e: 0x0309, 0x1a7f: 0x0040,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0311, 0x1a81: 0x00b1, 0x1a82: 0x0319, 0x1a83: 0x0101, 0x1a84: 0x0321, 0x1a85: 0x0040,
+ 0x1a86: 0x0051, 0x1a87: 0x0040, 0x1a88: 0x0040, 0x1a89: 0x0040, 0x1a8a: 0x0089, 0x1a8b: 0x0341,
+ 0x1a8c: 0x0349, 0x1a8d: 0x0391, 0x1a8e: 0x00c1, 0x1a8f: 0x0109, 0x1a90: 0x00c9, 0x1a91: 0x0040,
+ 0x1a92: 0x0019, 0x1a93: 0x02e9, 0x1a94: 0x03d9, 0x1a95: 0x02f1, 0x1a96: 0x02f9, 0x1a97: 0x03f1,
+ 0x1a98: 0x0309, 0x1a99: 0x00a9, 0x1a9a: 0x0311, 0x1a9b: 0x00b1, 0x1a9c: 0x0319, 0x1a9d: 0x0101,
+ 0x1a9e: 0x0321, 0x1a9f: 0x0329, 0x1aa0: 0x0051, 0x1aa1: 0x0339, 0x1aa2: 0x0751, 0x1aa3: 0x00b9,
+ 0x1aa4: 0x0089, 0x1aa5: 0x0341, 0x1aa6: 0x0349, 0x1aa7: 0x0391, 0x1aa8: 0x00c1, 0x1aa9: 0x0109,
+ 0x1aaa: 0x00c9, 0x1aab: 0x04b1, 0x1aac: 0x0019, 0x1aad: 0x02e9, 0x1aae: 0x03d9, 0x1aaf: 0x02f1,
+ 0x1ab0: 0x02f9, 0x1ab1: 0x03f1, 0x1ab2: 0x0309, 0x1ab3: 0x00a9, 0x1ab4: 0x0311, 0x1ab5: 0x00b1,
+ 0x1ab6: 0x0319, 0x1ab7: 0x0101, 0x1ab8: 0x0321, 0x1ab9: 0x0329, 0x1aba: 0x0051, 0x1abb: 0x0339,
+ 0x1abc: 0x0751, 0x1abd: 0x00b9, 0x1abe: 0x0089, 0x1abf: 0x0341,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x0349, 0x1ac1: 0x0391, 0x1ac2: 0x00c1, 0x1ac3: 0x0109, 0x1ac4: 0x00c9, 0x1ac5: 0x04b1,
+ 0x1ac6: 0x0019, 0x1ac7: 0x02e9, 0x1ac8: 0x03d9, 0x1ac9: 0x02f1, 0x1aca: 0x02f9, 0x1acb: 0x03f1,
+ 0x1acc: 0x0309, 0x1acd: 0x00a9, 0x1ace: 0x0311, 0x1acf: 0x00b1, 0x1ad0: 0x0319, 0x1ad1: 0x0101,
+ 0x1ad2: 0x0321, 0x1ad3: 0x0329, 0x1ad4: 0x0051, 0x1ad5: 0x0339, 0x1ad6: 0x0751, 0x1ad7: 0x00b9,
+ 0x1ad8: 0x0089, 0x1ad9: 0x0341, 0x1ada: 0x0349, 0x1adb: 0x0391, 0x1adc: 0x00c1, 0x1add: 0x0109,
+ 0x1ade: 0x00c9, 0x1adf: 0x04b1, 0x1ae0: 0x0019, 0x1ae1: 0x02e9, 0x1ae2: 0x03d9, 0x1ae3: 0x02f1,
+ 0x1ae4: 0x02f9, 0x1ae5: 0x03f1, 0x1ae6: 0x0309, 0x1ae7: 0x00a9, 0x1ae8: 0x0311, 0x1ae9: 0x00b1,
+ 0x1aea: 0x0319, 0x1aeb: 0x0101, 0x1aec: 0x0321, 0x1aed: 0x0329, 0x1aee: 0x0051, 0x1aef: 0x0339,
+ 0x1af0: 0x0751, 0x1af1: 0x00b9, 0x1af2: 0x0089, 0x1af3: 0x0341, 0x1af4: 0x0349, 0x1af5: 0x0391,
+ 0x1af6: 0x00c1, 0x1af7: 0x0109, 0x1af8: 0x00c9, 0x1af9: 0x04b1, 0x1afa: 0x0019, 0x1afb: 0x02e9,
+ 0x1afc: 0x03d9, 0x1afd: 0x02f1, 0x1afe: 0x02f9, 0x1aff: 0x03f1,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x0309, 0x1b01: 0x00a9, 0x1b02: 0x0311, 0x1b03: 0x00b1, 0x1b04: 0x0319, 0x1b05: 0x0101,
+ 0x1b06: 0x0321, 0x1b07: 0x0329, 0x1b08: 0x0051, 0x1b09: 0x0339, 0x1b0a: 0x0751, 0x1b0b: 0x00b9,
+ 0x1b0c: 0x0089, 0x1b0d: 0x0341, 0x1b0e: 0x0349, 0x1b0f: 0x0391, 0x1b10: 0x00c1, 0x1b11: 0x0109,
+ 0x1b12: 0x00c9, 0x1b13: 0x04b1, 0x1b14: 0x0019, 0x1b15: 0x02e9, 0x1b16: 0x03d9, 0x1b17: 0x02f1,
+ 0x1b18: 0x02f9, 0x1b19: 0x03f1, 0x1b1a: 0x0309, 0x1b1b: 0x00a9, 0x1b1c: 0x0311, 0x1b1d: 0x00b1,
+ 0x1b1e: 0x0319, 0x1b1f: 0x0101, 0x1b20: 0x0321, 0x1b21: 0x0329, 0x1b22: 0x0051, 0x1b23: 0x0339,
+ 0x1b24: 0x0751, 0x1b25: 0x00b9, 0x1b26: 0x0089, 0x1b27: 0x0341, 0x1b28: 0x0349, 0x1b29: 0x0391,
+ 0x1b2a: 0x00c1, 0x1b2b: 0x0109, 0x1b2c: 0x00c9, 0x1b2d: 0x04b1, 0x1b2e: 0x0019, 0x1b2f: 0x02e9,
+ 0x1b30: 0x03d9, 0x1b31: 0x02f1, 0x1b32: 0x02f9, 0x1b33: 0x03f1, 0x1b34: 0x0309, 0x1b35: 0x00a9,
+ 0x1b36: 0x0311, 0x1b37: 0x00b1, 0x1b38: 0x0319, 0x1b39: 0x0101, 0x1b3a: 0x0321, 0x1b3b: 0x0329,
+ 0x1b3c: 0x0051, 0x1b3d: 0x0339, 0x1b3e: 0x0751, 0x1b3f: 0x00b9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0x0089, 0x1b41: 0x0341, 0x1b42: 0x0349, 0x1b43: 0x0391, 0x1b44: 0x00c1, 0x1b45: 0x0109,
+ 0x1b46: 0x00c9, 0x1b47: 0x04b1, 0x1b48: 0x0019, 0x1b49: 0x02e9, 0x1b4a: 0x03d9, 0x1b4b: 0x02f1,
+ 0x1b4c: 0x02f9, 0x1b4d: 0x03f1, 0x1b4e: 0x0309, 0x1b4f: 0x00a9, 0x1b50: 0x0311, 0x1b51: 0x00b1,
+ 0x1b52: 0x0319, 0x1b53: 0x0101, 0x1b54: 0x0321, 0x1b55: 0x0329, 0x1b56: 0x0051, 0x1b57: 0x0339,
+ 0x1b58: 0x0751, 0x1b59: 0x00b9, 0x1b5a: 0x0089, 0x1b5b: 0x0341, 0x1b5c: 0x0349, 0x1b5d: 0x0391,
+ 0x1b5e: 0x00c1, 0x1b5f: 0x0109, 0x1b60: 0x00c9, 0x1b61: 0x04b1, 0x1b62: 0x0019, 0x1b63: 0x02e9,
+ 0x1b64: 0x03d9, 0x1b65: 0x02f1, 0x1b66: 0x02f9, 0x1b67: 0x03f1, 0x1b68: 0x0309, 0x1b69: 0x00a9,
+ 0x1b6a: 0x0311, 0x1b6b: 0x00b1, 0x1b6c: 0x0319, 0x1b6d: 0x0101, 0x1b6e: 0x0321, 0x1b6f: 0x0329,
+ 0x1b70: 0x0051, 0x1b71: 0x0339, 0x1b72: 0x0751, 0x1b73: 0x00b9, 0x1b74: 0x0089, 0x1b75: 0x0341,
+ 0x1b76: 0x0349, 0x1b77: 0x0391, 0x1b78: 0x00c1, 0x1b79: 0x0109, 0x1b7a: 0x00c9, 0x1b7b: 0x04b1,
+ 0x1b7c: 0x0019, 0x1b7d: 0x02e9, 0x1b7e: 0x03d9, 0x1b7f: 0x02f1,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0x02f9, 0x1b81: 0x03f1, 0x1b82: 0x0309, 0x1b83: 0x00a9, 0x1b84: 0x0311, 0x1b85: 0x00b1,
+ 0x1b86: 0x0319, 0x1b87: 0x0101, 0x1b88: 0x0321, 0x1b89: 0x0329, 0x1b8a: 0x0051, 0x1b8b: 0x0339,
+ 0x1b8c: 0x0751, 0x1b8d: 0x00b9, 0x1b8e: 0x0089, 0x1b8f: 0x0341, 0x1b90: 0x0349, 0x1b91: 0x0391,
+ 0x1b92: 0x00c1, 0x1b93: 0x0109, 0x1b94: 0x00c9, 0x1b95: 0x04b1, 0x1b96: 0x0019, 0x1b97: 0x02e9,
+ 0x1b98: 0x03d9, 0x1b99: 0x02f1, 0x1b9a: 0x02f9, 0x1b9b: 0x03f1, 0x1b9c: 0x0309, 0x1b9d: 0x00a9,
+ 0x1b9e: 0x0311, 0x1b9f: 0x00b1, 0x1ba0: 0x0319, 0x1ba1: 0x0101, 0x1ba2: 0x0321, 0x1ba3: 0x0329,
+ 0x1ba4: 0x0051, 0x1ba5: 0x0339, 0x1ba6: 0x0751, 0x1ba7: 0x00b9, 0x1ba8: 0x0089, 0x1ba9: 0x0341,
+ 0x1baa: 0x0349, 0x1bab: 0x0391, 0x1bac: 0x00c1, 0x1bad: 0x0109, 0x1bae: 0x00c9, 0x1baf: 0x04b1,
+ 0x1bb0: 0x0019, 0x1bb1: 0x02e9, 0x1bb2: 0x03d9, 0x1bb3: 0x02f1, 0x1bb4: 0x02f9, 0x1bb5: 0x03f1,
+ 0x1bb6: 0x0309, 0x1bb7: 0x00a9, 0x1bb8: 0x0311, 0x1bb9: 0x00b1, 0x1bba: 0x0319, 0x1bbb: 0x0101,
+ 0x1bbc: 0x0321, 0x1bbd: 0x0329, 0x1bbe: 0x0051, 0x1bbf: 0x0339,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x0751, 0x1bc1: 0x00b9, 0x1bc2: 0x0089, 0x1bc3: 0x0341, 0x1bc4: 0x0349, 0x1bc5: 0x0391,
+ 0x1bc6: 0x00c1, 0x1bc7: 0x0109, 0x1bc8: 0x00c9, 0x1bc9: 0x04b1, 0x1bca: 0x0019, 0x1bcb: 0x02e9,
+ 0x1bcc: 0x03d9, 0x1bcd: 0x02f1, 0x1bce: 0x02f9, 0x1bcf: 0x03f1, 0x1bd0: 0x0309, 0x1bd1: 0x00a9,
+ 0x1bd2: 0x0311, 0x1bd3: 0x00b1, 0x1bd4: 0x0319, 0x1bd5: 0x0101, 0x1bd6: 0x0321, 0x1bd7: 0x0329,
+ 0x1bd8: 0x0051, 0x1bd9: 0x0339, 0x1bda: 0x0751, 0x1bdb: 0x00b9, 0x1bdc: 0x0089, 0x1bdd: 0x0341,
+ 0x1bde: 0x0349, 0x1bdf: 0x0391, 0x1be0: 0x00c1, 0x1be1: 0x0109, 0x1be2: 0x00c9, 0x1be3: 0x04b1,
+ 0x1be4: 0x23e1, 0x1be5: 0x23e9, 0x1be6: 0x0040, 0x1be7: 0x0040, 0x1be8: 0x23f1, 0x1be9: 0x0399,
+ 0x1bea: 0x03a1, 0x1beb: 0x03a9, 0x1bec: 0x23f9, 0x1bed: 0x2401, 0x1bee: 0x2409, 0x1bef: 0x04d1,
+ 0x1bf0: 0x05f9, 0x1bf1: 0x2411, 0x1bf2: 0x2419, 0x1bf3: 0x2421, 0x1bf4: 0x2429, 0x1bf5: 0x2431,
+ 0x1bf6: 0x2439, 0x1bf7: 0x0799, 0x1bf8: 0x03c1, 0x1bf9: 0x04d1, 0x1bfa: 0x2441, 0x1bfb: 0x2449,
+ 0x1bfc: 0x2451, 0x1bfd: 0x03b1, 0x1bfe: 0x03b9, 0x1bff: 0x2459,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x0769, 0x1c01: 0x2461, 0x1c02: 0x23f1, 0x1c03: 0x0399, 0x1c04: 0x03a1, 0x1c05: 0x03a9,
+ 0x1c06: 0x23f9, 0x1c07: 0x2401, 0x1c08: 0x2409, 0x1c09: 0x04d1, 0x1c0a: 0x05f9, 0x1c0b: 0x2411,
+ 0x1c0c: 0x2419, 0x1c0d: 0x2421, 0x1c0e: 0x2429, 0x1c0f: 0x2431, 0x1c10: 0x2439, 0x1c11: 0x0799,
+ 0x1c12: 0x03c1, 0x1c13: 0x2441, 0x1c14: 0x2441, 0x1c15: 0x2449, 0x1c16: 0x2451, 0x1c17: 0x03b1,
+ 0x1c18: 0x03b9, 0x1c19: 0x2459, 0x1c1a: 0x0769, 0x1c1b: 0x2469, 0x1c1c: 0x23f9, 0x1c1d: 0x04d1,
+ 0x1c1e: 0x2411, 0x1c1f: 0x03b1, 0x1c20: 0x03c1, 0x1c21: 0x0799, 0x1c22: 0x23f1, 0x1c23: 0x0399,
+ 0x1c24: 0x03a1, 0x1c25: 0x03a9, 0x1c26: 0x23f9, 0x1c27: 0x2401, 0x1c28: 0x2409, 0x1c29: 0x04d1,
+ 0x1c2a: 0x05f9, 0x1c2b: 0x2411, 0x1c2c: 0x2419, 0x1c2d: 0x2421, 0x1c2e: 0x2429, 0x1c2f: 0x2431,
+ 0x1c30: 0x2439, 0x1c31: 0x0799, 0x1c32: 0x03c1, 0x1c33: 0x04d1, 0x1c34: 0x2441, 0x1c35: 0x2449,
+ 0x1c36: 0x2451, 0x1c37: 0x03b1, 0x1c38: 0x03b9, 0x1c39: 0x2459, 0x1c3a: 0x0769, 0x1c3b: 0x2461,
+ 0x1c3c: 0x23f1, 0x1c3d: 0x0399, 0x1c3e: 0x03a1, 0x1c3f: 0x03a9,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0x23f9, 0x1c41: 0x2401, 0x1c42: 0x2409, 0x1c43: 0x04d1, 0x1c44: 0x05f9, 0x1c45: 0x2411,
+ 0x1c46: 0x2419, 0x1c47: 0x2421, 0x1c48: 0x2429, 0x1c49: 0x2431, 0x1c4a: 0x2439, 0x1c4b: 0x0799,
+ 0x1c4c: 0x03c1, 0x1c4d: 0x2441, 0x1c4e: 0x2441, 0x1c4f: 0x2449, 0x1c50: 0x2451, 0x1c51: 0x03b1,
+ 0x1c52: 0x03b9, 0x1c53: 0x2459, 0x1c54: 0x0769, 0x1c55: 0x2469, 0x1c56: 0x23f9, 0x1c57: 0x04d1,
+ 0x1c58: 0x2411, 0x1c59: 0x03b1, 0x1c5a: 0x03c1, 0x1c5b: 0x0799, 0x1c5c: 0x23f1, 0x1c5d: 0x0399,
+ 0x1c5e: 0x03a1, 0x1c5f: 0x03a9, 0x1c60: 0x23f9, 0x1c61: 0x2401, 0x1c62: 0x2409, 0x1c63: 0x04d1,
+ 0x1c64: 0x05f9, 0x1c65: 0x2411, 0x1c66: 0x2419, 0x1c67: 0x2421, 0x1c68: 0x2429, 0x1c69: 0x2431,
+ 0x1c6a: 0x2439, 0x1c6b: 0x0799, 0x1c6c: 0x03c1, 0x1c6d: 0x04d1, 0x1c6e: 0x2441, 0x1c6f: 0x2449,
+ 0x1c70: 0x2451, 0x1c71: 0x03b1, 0x1c72: 0x03b9, 0x1c73: 0x2459, 0x1c74: 0x0769, 0x1c75: 0x2461,
+ 0x1c76: 0x23f1, 0x1c77: 0x0399, 0x1c78: 0x03a1, 0x1c79: 0x03a9, 0x1c7a: 0x23f9, 0x1c7b: 0x2401,
+ 0x1c7c: 0x2409, 0x1c7d: 0x04d1, 0x1c7e: 0x05f9, 0x1c7f: 0x2411,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0x2419, 0x1c81: 0x2421, 0x1c82: 0x2429, 0x1c83: 0x2431, 0x1c84: 0x2439, 0x1c85: 0x0799,
+ 0x1c86: 0x03c1, 0x1c87: 0x2441, 0x1c88: 0x2441, 0x1c89: 0x2449, 0x1c8a: 0x2451, 0x1c8b: 0x03b1,
+ 0x1c8c: 0x03b9, 0x1c8d: 0x2459, 0x1c8e: 0x0769, 0x1c8f: 0x2469, 0x1c90: 0x23f9, 0x1c91: 0x04d1,
+ 0x1c92: 0x2411, 0x1c93: 0x03b1, 0x1c94: 0x03c1, 0x1c95: 0x0799, 0x1c96: 0x23f1, 0x1c97: 0x0399,
+ 0x1c98: 0x03a1, 0x1c99: 0x03a9, 0x1c9a: 0x23f9, 0x1c9b: 0x2401, 0x1c9c: 0x2409, 0x1c9d: 0x04d1,
+ 0x1c9e: 0x05f9, 0x1c9f: 0x2411, 0x1ca0: 0x2419, 0x1ca1: 0x2421, 0x1ca2: 0x2429, 0x1ca3: 0x2431,
+ 0x1ca4: 0x2439, 0x1ca5: 0x0799, 0x1ca6: 0x03c1, 0x1ca7: 0x04d1, 0x1ca8: 0x2441, 0x1ca9: 0x2449,
+ 0x1caa: 0x2451, 0x1cab: 0x03b1, 0x1cac: 0x03b9, 0x1cad: 0x2459, 0x1cae: 0x0769, 0x1caf: 0x2461,
+ 0x1cb0: 0x23f1, 0x1cb1: 0x0399, 0x1cb2: 0x03a1, 0x1cb3: 0x03a9, 0x1cb4: 0x23f9, 0x1cb5: 0x2401,
+ 0x1cb6: 0x2409, 0x1cb7: 0x04d1, 0x1cb8: 0x05f9, 0x1cb9: 0x2411, 0x1cba: 0x2419, 0x1cbb: 0x2421,
+ 0x1cbc: 0x2429, 0x1cbd: 0x2431, 0x1cbe: 0x2439, 0x1cbf: 0x0799,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x03c1, 0x1cc1: 0x2441, 0x1cc2: 0x2441, 0x1cc3: 0x2449, 0x1cc4: 0x2451, 0x1cc5: 0x03b1,
+ 0x1cc6: 0x03b9, 0x1cc7: 0x2459, 0x1cc8: 0x0769, 0x1cc9: 0x2469, 0x1cca: 0x23f9, 0x1ccb: 0x04d1,
+ 0x1ccc: 0x2411, 0x1ccd: 0x03b1, 0x1cce: 0x03c1, 0x1ccf: 0x0799, 0x1cd0: 0x23f1, 0x1cd1: 0x0399,
+ 0x1cd2: 0x03a1, 0x1cd3: 0x03a9, 0x1cd4: 0x23f9, 0x1cd5: 0x2401, 0x1cd6: 0x2409, 0x1cd7: 0x04d1,
+ 0x1cd8: 0x05f9, 0x1cd9: 0x2411, 0x1cda: 0x2419, 0x1cdb: 0x2421, 0x1cdc: 0x2429, 0x1cdd: 0x2431,
+ 0x1cde: 0x2439, 0x1cdf: 0x0799, 0x1ce0: 0x03c1, 0x1ce1: 0x04d1, 0x1ce2: 0x2441, 0x1ce3: 0x2449,
+ 0x1ce4: 0x2451, 0x1ce5: 0x03b1, 0x1ce6: 0x03b9, 0x1ce7: 0x2459, 0x1ce8: 0x0769, 0x1ce9: 0x2461,
+ 0x1cea: 0x23f1, 0x1ceb: 0x0399, 0x1cec: 0x03a1, 0x1ced: 0x03a9, 0x1cee: 0x23f9, 0x1cef: 0x2401,
+ 0x1cf0: 0x2409, 0x1cf1: 0x04d1, 0x1cf2: 0x05f9, 0x1cf3: 0x2411, 0x1cf4: 0x2419, 0x1cf5: 0x2421,
+ 0x1cf6: 0x2429, 0x1cf7: 0x2431, 0x1cf8: 0x2439, 0x1cf9: 0x0799, 0x1cfa: 0x03c1, 0x1cfb: 0x2441,
+ 0x1cfc: 0x2441, 0x1cfd: 0x2449, 0x1cfe: 0x2451, 0x1cff: 0x03b1,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0x03b9, 0x1d01: 0x2459, 0x1d02: 0x0769, 0x1d03: 0x2469, 0x1d04: 0x23f9, 0x1d05: 0x04d1,
+ 0x1d06: 0x2411, 0x1d07: 0x03b1, 0x1d08: 0x03c1, 0x1d09: 0x0799, 0x1d0a: 0x2471, 0x1d0b: 0x2471,
+ 0x1d0c: 0x0040, 0x1d0d: 0x0040, 0x1d0e: 0x06e1, 0x1d0f: 0x0049, 0x1d10: 0x0029, 0x1d11: 0x0031,
+ 0x1d12: 0x06e9, 0x1d13: 0x06f1, 0x1d14: 0x06f9, 0x1d15: 0x0701, 0x1d16: 0x0709, 0x1d17: 0x0711,
+ 0x1d18: 0x06e1, 0x1d19: 0x0049, 0x1d1a: 0x0029, 0x1d1b: 0x0031, 0x1d1c: 0x06e9, 0x1d1d: 0x06f1,
+ 0x1d1e: 0x06f9, 0x1d1f: 0x0701, 0x1d20: 0x0709, 0x1d21: 0x0711, 0x1d22: 0x06e1, 0x1d23: 0x0049,
+ 0x1d24: 0x0029, 0x1d25: 0x0031, 0x1d26: 0x06e9, 0x1d27: 0x06f1, 0x1d28: 0x06f9, 0x1d29: 0x0701,
+ 0x1d2a: 0x0709, 0x1d2b: 0x0711, 0x1d2c: 0x06e1, 0x1d2d: 0x0049, 0x1d2e: 0x0029, 0x1d2f: 0x0031,
+ 0x1d30: 0x06e9, 0x1d31: 0x06f1, 0x1d32: 0x06f9, 0x1d33: 0x0701, 0x1d34: 0x0709, 0x1d35: 0x0711,
+ 0x1d36: 0x06e1, 0x1d37: 0x0049, 0x1d38: 0x0029, 0x1d39: 0x0031, 0x1d3a: 0x06e9, 0x1d3b: 0x06f1,
+ 0x1d3c: 0x06f9, 0x1d3d: 0x0701, 0x1d3e: 0x0709, 0x1d3f: 0x0711,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x3308, 0x1d41: 0x3308, 0x1d42: 0x3308, 0x1d43: 0x3308, 0x1d44: 0x3308, 0x1d45: 0x3308,
+ 0x1d46: 0x3308, 0x1d47: 0x0040, 0x1d48: 0x3308, 0x1d49: 0x3308, 0x1d4a: 0x3308, 0x1d4b: 0x3308,
+ 0x1d4c: 0x3308, 0x1d4d: 0x3308, 0x1d4e: 0x3308, 0x1d4f: 0x3308, 0x1d50: 0x3308, 0x1d51: 0x3308,
+ 0x1d52: 0x3308, 0x1d53: 0x3308, 0x1d54: 0x3308, 0x1d55: 0x3308, 0x1d56: 0x3308, 0x1d57: 0x3308,
+ 0x1d58: 0x3308, 0x1d59: 0x0040, 0x1d5a: 0x0040, 0x1d5b: 0x3308, 0x1d5c: 0x3308, 0x1d5d: 0x3308,
+ 0x1d5e: 0x3308, 0x1d5f: 0x3308, 0x1d60: 0x3308, 0x1d61: 0x3308, 0x1d62: 0x0040, 0x1d63: 0x3308,
+ 0x1d64: 0x3308, 0x1d65: 0x0040, 0x1d66: 0x3308, 0x1d67: 0x3308, 0x1d68: 0x3308, 0x1d69: 0x3308,
+ 0x1d6a: 0x3308, 0x1d6b: 0x0040, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040,
+ 0x1d70: 0x2479, 0x1d71: 0x2481, 0x1d72: 0x02a9, 0x1d73: 0x2489, 0x1d74: 0x02b1, 0x1d75: 0x2491,
+ 0x1d76: 0x2499, 0x1d77: 0x24a1, 0x1d78: 0x24a9, 0x1d79: 0x24b1, 0x1d7a: 0x24b9, 0x1d7b: 0x24c1,
+ 0x1d7c: 0x02b9, 0x1d7d: 0x24c9, 0x1d7e: 0x24d1, 0x1d7f: 0x02c1,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x02c9, 0x1d81: 0x24d9, 0x1d82: 0x24e1, 0x1d83: 0x24e9, 0x1d84: 0x24f1, 0x1d85: 0x24f9,
+ 0x1d86: 0x2501, 0x1d87: 0x2509, 0x1d88: 0x2511, 0x1d89: 0x2519, 0x1d8a: 0x2521, 0x1d8b: 0x2529,
+ 0x1d8c: 0x2531, 0x1d8d: 0x2539, 0x1d8e: 0x2541, 0x1d8f: 0x2549, 0x1d90: 0x2551, 0x1d91: 0x2479,
+ 0x1d92: 0x2481, 0x1d93: 0x02a9, 0x1d94: 0x2489, 0x1d95: 0x02b1, 0x1d96: 0x2491, 0x1d97: 0x2499,
+ 0x1d98: 0x24a1, 0x1d99: 0x24a9, 0x1d9a: 0x24b1, 0x1d9b: 0x24b9, 0x1d9c: 0x02b9, 0x1d9d: 0x24c9,
+ 0x1d9e: 0x02c1, 0x1d9f: 0x24d9, 0x1da0: 0x24e1, 0x1da1: 0x24e9, 0x1da2: 0x24f1, 0x1da3: 0x24f9,
+ 0x1da4: 0x2501, 0x1da5: 0x02d1, 0x1da6: 0x2509, 0x1da7: 0x2559, 0x1da8: 0x2531, 0x1da9: 0x2561,
+ 0x1daa: 0x2569, 0x1dab: 0x2571, 0x1dac: 0x2579, 0x1dad: 0x2581, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0040, 0x1db1: 0x0040, 0x1db2: 0x0040, 0x1db3: 0x0040, 0x1db4: 0x0040, 0x1db5: 0x0040,
+ 0x1db6: 0x0040, 0x1db7: 0x0040, 0x1db8: 0x0040, 0x1db9: 0x0040, 0x1dba: 0x0040, 0x1dbb: 0x0040,
+ 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xe115, 0x1dc1: 0xe115, 0x1dc2: 0xe135, 0x1dc3: 0xe135, 0x1dc4: 0xe115, 0x1dc5: 0xe115,
+ 0x1dc6: 0xe175, 0x1dc7: 0xe175, 0x1dc8: 0xe115, 0x1dc9: 0xe115, 0x1dca: 0xe135, 0x1dcb: 0xe135,
+ 0x1dcc: 0xe115, 0x1dcd: 0xe115, 0x1dce: 0xe1f5, 0x1dcf: 0xe1f5, 0x1dd0: 0xe115, 0x1dd1: 0xe115,
+ 0x1dd2: 0xe135, 0x1dd3: 0xe135, 0x1dd4: 0xe115, 0x1dd5: 0xe115, 0x1dd6: 0xe175, 0x1dd7: 0xe175,
+ 0x1dd8: 0xe115, 0x1dd9: 0xe115, 0x1dda: 0xe135, 0x1ddb: 0xe135, 0x1ddc: 0xe115, 0x1ddd: 0xe115,
+ 0x1dde: 0x8ca5, 0x1ddf: 0x8ca5, 0x1de0: 0x04b5, 0x1de1: 0x04b5, 0x1de2: 0x0a08, 0x1de3: 0x0a08,
+ 0x1de4: 0x0a08, 0x1de5: 0x0a08, 0x1de6: 0x0a08, 0x1de7: 0x0a08, 0x1de8: 0x0a08, 0x1de9: 0x0a08,
+ 0x1dea: 0x0a08, 0x1deb: 0x0a08, 0x1dec: 0x0a08, 0x1ded: 0x0a08, 0x1dee: 0x0a08, 0x1def: 0x0a08,
+ 0x1df0: 0x0a08, 0x1df1: 0x0a08, 0x1df2: 0x0a08, 0x1df3: 0x0a08, 0x1df4: 0x0a08, 0x1df5: 0x0a08,
+ 0x1df6: 0x0a08, 0x1df7: 0x0a08, 0x1df8: 0x0a08, 0x1df9: 0x0a08, 0x1dfa: 0x0a08, 0x1dfb: 0x0a08,
+ 0x1dfc: 0x0a08, 0x1dfd: 0x0a08, 0x1dfe: 0x0a08, 0x1dff: 0x0a08,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0x20b1, 0x1e01: 0x20b9, 0x1e02: 0x20d9, 0x1e03: 0x20f1, 0x1e04: 0x0040, 0x1e05: 0x2189,
+ 0x1e06: 0x2109, 0x1e07: 0x20e1, 0x1e08: 0x2131, 0x1e09: 0x2191, 0x1e0a: 0x2161, 0x1e0b: 0x2169,
+ 0x1e0c: 0x2171, 0x1e0d: 0x2179, 0x1e0e: 0x2111, 0x1e0f: 0x2141, 0x1e10: 0x2151, 0x1e11: 0x2121,
+ 0x1e12: 0x2159, 0x1e13: 0x2101, 0x1e14: 0x2119, 0x1e15: 0x20c9, 0x1e16: 0x20d1, 0x1e17: 0x20e9,
+ 0x1e18: 0x20f9, 0x1e19: 0x2129, 0x1e1a: 0x2139, 0x1e1b: 0x2149, 0x1e1c: 0x2589, 0x1e1d: 0x1689,
+ 0x1e1e: 0x2591, 0x1e1f: 0x2599, 0x1e20: 0x0040, 0x1e21: 0x20b9, 0x1e22: 0x20d9, 0x1e23: 0x0040,
+ 0x1e24: 0x2181, 0x1e25: 0x0040, 0x1e26: 0x0040, 0x1e27: 0x20e1, 0x1e28: 0x0040, 0x1e29: 0x2191,
+ 0x1e2a: 0x2161, 0x1e2b: 0x2169, 0x1e2c: 0x2171, 0x1e2d: 0x2179, 0x1e2e: 0x2111, 0x1e2f: 0x2141,
+ 0x1e30: 0x2151, 0x1e31: 0x2121, 0x1e32: 0x2159, 0x1e33: 0x0040, 0x1e34: 0x2119, 0x1e35: 0x20c9,
+ 0x1e36: 0x20d1, 0x1e37: 0x20e9, 0x1e38: 0x0040, 0x1e39: 0x2129, 0x1e3a: 0x0040, 0x1e3b: 0x2149,
+ 0x1e3c: 0x0040, 0x1e3d: 0x0040, 0x1e3e: 0x0040, 0x1e3f: 0x0040,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0x0040, 0x1e41: 0x0040, 0x1e42: 0x20d9, 0x1e43: 0x0040, 0x1e44: 0x0040, 0x1e45: 0x0040,
+ 0x1e46: 0x0040, 0x1e47: 0x20e1, 0x1e48: 0x0040, 0x1e49: 0x2191, 0x1e4a: 0x0040, 0x1e4b: 0x2169,
+ 0x1e4c: 0x0040, 0x1e4d: 0x2179, 0x1e4e: 0x2111, 0x1e4f: 0x2141, 0x1e50: 0x0040, 0x1e51: 0x2121,
+ 0x1e52: 0x2159, 0x1e53: 0x0040, 0x1e54: 0x2119, 0x1e55: 0x0040, 0x1e56: 0x0040, 0x1e57: 0x20e9,
+ 0x1e58: 0x0040, 0x1e59: 0x2129, 0x1e5a: 0x0040, 0x1e5b: 0x2149, 0x1e5c: 0x0040, 0x1e5d: 0x1689,
+ 0x1e5e: 0x0040, 0x1e5f: 0x2599, 0x1e60: 0x0040, 0x1e61: 0x20b9, 0x1e62: 0x20d9, 0x1e63: 0x0040,
+ 0x1e64: 0x2181, 0x1e65: 0x0040, 0x1e66: 0x0040, 0x1e67: 0x20e1, 0x1e68: 0x2131, 0x1e69: 0x2191,
+ 0x1e6a: 0x2161, 0x1e6b: 0x0040, 0x1e6c: 0x2171, 0x1e6d: 0x2179, 0x1e6e: 0x2111, 0x1e6f: 0x2141,
+ 0x1e70: 0x2151, 0x1e71: 0x2121, 0x1e72: 0x2159, 0x1e73: 0x0040, 0x1e74: 0x2119, 0x1e75: 0x20c9,
+ 0x1e76: 0x20d1, 0x1e77: 0x20e9, 0x1e78: 0x0040, 0x1e79: 0x2129, 0x1e7a: 0x2139, 0x1e7b: 0x2149,
+ 0x1e7c: 0x2589, 0x1e7d: 0x0040, 0x1e7e: 0x2591, 0x1e7f: 0x0040,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0x20b1, 0x1e81: 0x20b9, 0x1e82: 0x20d9, 0x1e83: 0x20f1, 0x1e84: 0x2181, 0x1e85: 0x2189,
+ 0x1e86: 0x2109, 0x1e87: 0x20e1, 0x1e88: 0x2131, 0x1e89: 0x2191, 0x1e8a: 0x0040, 0x1e8b: 0x2169,
+ 0x1e8c: 0x2171, 0x1e8d: 0x2179, 0x1e8e: 0x2111, 0x1e8f: 0x2141, 0x1e90: 0x2151, 0x1e91: 0x2121,
+ 0x1e92: 0x2159, 0x1e93: 0x2101, 0x1e94: 0x2119, 0x1e95: 0x20c9, 0x1e96: 0x20d1, 0x1e97: 0x20e9,
+ 0x1e98: 0x20f9, 0x1e99: 0x2129, 0x1e9a: 0x2139, 0x1e9b: 0x2149, 0x1e9c: 0x0040, 0x1e9d: 0x0040,
+ 0x1e9e: 0x0040, 0x1e9f: 0x0040, 0x1ea0: 0x0040, 0x1ea1: 0x20b9, 0x1ea2: 0x20d9, 0x1ea3: 0x20f1,
+ 0x1ea4: 0x0040, 0x1ea5: 0x2189, 0x1ea6: 0x2109, 0x1ea7: 0x20e1, 0x1ea8: 0x2131, 0x1ea9: 0x2191,
+ 0x1eaa: 0x0040, 0x1eab: 0x2169, 0x1eac: 0x2171, 0x1ead: 0x2179, 0x1eae: 0x2111, 0x1eaf: 0x2141,
+ 0x1eb0: 0x2151, 0x1eb1: 0x2121, 0x1eb2: 0x2159, 0x1eb3: 0x2101, 0x1eb4: 0x2119, 0x1eb5: 0x20c9,
+ 0x1eb6: 0x20d1, 0x1eb7: 0x20e9, 0x1eb8: 0x20f9, 0x1eb9: 0x2129, 0x1eba: 0x2139, 0x1ebb: 0x2149,
+ 0x1ebc: 0x0040, 0x1ebd: 0x0040, 0x1ebe: 0x0040, 0x1ebf: 0x0040,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0x0040, 0x1ec1: 0x25a2, 0x1ec2: 0x25aa, 0x1ec3: 0x25b2, 0x1ec4: 0x25ba, 0x1ec5: 0x25c2,
+ 0x1ec6: 0x25ca, 0x1ec7: 0x25d2, 0x1ec8: 0x25da, 0x1ec9: 0x25e2, 0x1eca: 0x25ea, 0x1ecb: 0x0018,
+ 0x1ecc: 0x0018, 0x1ecd: 0x0018, 0x1ece: 0x0018, 0x1ecf: 0x0018, 0x1ed0: 0x25f2, 0x1ed1: 0x25fa,
+ 0x1ed2: 0x2602, 0x1ed3: 0x260a, 0x1ed4: 0x2612, 0x1ed5: 0x261a, 0x1ed6: 0x2622, 0x1ed7: 0x262a,
+ 0x1ed8: 0x2632, 0x1ed9: 0x263a, 0x1eda: 0x2642, 0x1edb: 0x264a, 0x1edc: 0x2652, 0x1edd: 0x265a,
+ 0x1ede: 0x2662, 0x1edf: 0x266a, 0x1ee0: 0x2672, 0x1ee1: 0x267a, 0x1ee2: 0x2682, 0x1ee3: 0x268a,
+ 0x1ee4: 0x2692, 0x1ee5: 0x269a, 0x1ee6: 0x26a2, 0x1ee7: 0x26aa, 0x1ee8: 0x26b2, 0x1ee9: 0x26ba,
+ 0x1eea: 0x26c1, 0x1eeb: 0x03d9, 0x1eec: 0x00b9, 0x1eed: 0x1239, 0x1eee: 0x26c9, 0x1eef: 0x0018,
+ 0x1ef0: 0x0019, 0x1ef1: 0x02e9, 0x1ef2: 0x03d9, 0x1ef3: 0x02f1, 0x1ef4: 0x02f9, 0x1ef5: 0x03f1,
+ 0x1ef6: 0x0309, 0x1ef7: 0x00a9, 0x1ef8: 0x0311, 0x1ef9: 0x00b1, 0x1efa: 0x0319, 0x1efb: 0x0101,
+ 0x1efc: 0x0321, 0x1efd: 0x0329, 0x1efe: 0x0051, 0x1eff: 0x0339,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0x0751, 0x1f01: 0x00b9, 0x1f02: 0x0089, 0x1f03: 0x0341, 0x1f04: 0x0349, 0x1f05: 0x0391,
+ 0x1f06: 0x00c1, 0x1f07: 0x0109, 0x1f08: 0x00c9, 0x1f09: 0x04b1, 0x1f0a: 0x26d1, 0x1f0b: 0x11f9,
+ 0x1f0c: 0x26d9, 0x1f0d: 0x04d9, 0x1f0e: 0x26e1, 0x1f0f: 0x26e9, 0x1f10: 0x0018, 0x1f11: 0x0018,
+ 0x1f12: 0x0018, 0x1f13: 0x0018, 0x1f14: 0x0018, 0x1f15: 0x0018, 0x1f16: 0x0018, 0x1f17: 0x0018,
+ 0x1f18: 0x0018, 0x1f19: 0x0018, 0x1f1a: 0x0018, 0x1f1b: 0x0018, 0x1f1c: 0x0018, 0x1f1d: 0x0018,
+ 0x1f1e: 0x0018, 0x1f1f: 0x0018, 0x1f20: 0x0018, 0x1f21: 0x0018, 0x1f22: 0x0018, 0x1f23: 0x0018,
+ 0x1f24: 0x0018, 0x1f25: 0x0018, 0x1f26: 0x0018, 0x1f27: 0x0018, 0x1f28: 0x0018, 0x1f29: 0x0018,
+ 0x1f2a: 0x26f1, 0x1f2b: 0x26f9, 0x1f2c: 0x2701, 0x1f2d: 0x0018, 0x1f2e: 0x0018, 0x1f2f: 0x0018,
+ 0x1f30: 0x0018, 0x1f31: 0x0018, 0x1f32: 0x0018, 0x1f33: 0x0018, 0x1f34: 0x0018, 0x1f35: 0x0018,
+ 0x1f36: 0x0018, 0x1f37: 0x0018, 0x1f38: 0x0018, 0x1f39: 0x0018, 0x1f3a: 0x0018, 0x1f3b: 0x0018,
+ 0x1f3c: 0x0018, 0x1f3d: 0x0018, 0x1f3e: 0x0018, 0x1f3f: 0x0018,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0x2711, 0x1f41: 0x2719, 0x1f42: 0x2721, 0x1f43: 0x0040, 0x1f44: 0x0040, 0x1f45: 0x0040,
+ 0x1f46: 0x0040, 0x1f47: 0x0040, 0x1f48: 0x0040, 0x1f49: 0x0040, 0x1f4a: 0x0040, 0x1f4b: 0x0040,
+ 0x1f4c: 0x0040, 0x1f4d: 0x0040, 0x1f4e: 0x0040, 0x1f4f: 0x0040, 0x1f50: 0x2729, 0x1f51: 0x2731,
+ 0x1f52: 0x2739, 0x1f53: 0x2741, 0x1f54: 0x2749, 0x1f55: 0x2751, 0x1f56: 0x2759, 0x1f57: 0x2761,
+ 0x1f58: 0x2769, 0x1f59: 0x2771, 0x1f5a: 0x2779, 0x1f5b: 0x2781, 0x1f5c: 0x2789, 0x1f5d: 0x2791,
+ 0x1f5e: 0x2799, 0x1f5f: 0x27a1, 0x1f60: 0x27a9, 0x1f61: 0x27b1, 0x1f62: 0x27b9, 0x1f63: 0x27c1,
+ 0x1f64: 0x27c9, 0x1f65: 0x27d1, 0x1f66: 0x27d9, 0x1f67: 0x27e1, 0x1f68: 0x27e9, 0x1f69: 0x27f1,
+ 0x1f6a: 0x27f9, 0x1f6b: 0x2801, 0x1f6c: 0x2809, 0x1f6d: 0x2811, 0x1f6e: 0x2819, 0x1f6f: 0x2821,
+ 0x1f70: 0x2829, 0x1f71: 0x2831, 0x1f72: 0x2839, 0x1f73: 0x2841, 0x1f74: 0x2849, 0x1f75: 0x2851,
+ 0x1f76: 0x2859, 0x1f77: 0x2861, 0x1f78: 0x2869, 0x1f79: 0x2871, 0x1f7a: 0x2879, 0x1f7b: 0x2881,
+ 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0x28e1, 0x1f81: 0x28e9, 0x1f82: 0x28f1, 0x1f83: 0x8cbd, 0x1f84: 0x28f9, 0x1f85: 0x2901,
+ 0x1f86: 0x2909, 0x1f87: 0x2911, 0x1f88: 0x2919, 0x1f89: 0x2921, 0x1f8a: 0x2929, 0x1f8b: 0x2931,
+ 0x1f8c: 0x2939, 0x1f8d: 0x8cdd, 0x1f8e: 0x2941, 0x1f8f: 0x2949, 0x1f90: 0x2951, 0x1f91: 0x2959,
+ 0x1f92: 0x8cfd, 0x1f93: 0x2961, 0x1f94: 0x2969, 0x1f95: 0x2799, 0x1f96: 0x8d1d, 0x1f97: 0x2971,
+ 0x1f98: 0x2979, 0x1f99: 0x2981, 0x1f9a: 0x2989, 0x1f9b: 0x2991, 0x1f9c: 0x8d3d, 0x1f9d: 0x2999,
+ 0x1f9e: 0x29a1, 0x1f9f: 0x29a9, 0x1fa0: 0x29b1, 0x1fa1: 0x29b9, 0x1fa2: 0x2871, 0x1fa3: 0x29c1,
+ 0x1fa4: 0x29c9, 0x1fa5: 0x29d1, 0x1fa6: 0x29d9, 0x1fa7: 0x29e1, 0x1fa8: 0x29e9, 0x1fa9: 0x29f1,
+ 0x1faa: 0x29f9, 0x1fab: 0x2a01, 0x1fac: 0x2a09, 0x1fad: 0x2a11, 0x1fae: 0x2a19, 0x1faf: 0x2a21,
+ 0x1fb0: 0x2a29, 0x1fb1: 0x2a31, 0x1fb2: 0x2a31, 0x1fb3: 0x2a31, 0x1fb4: 0x8d5d, 0x1fb5: 0x2a39,
+ 0x1fb6: 0x2a41, 0x1fb7: 0x2a49, 0x1fb8: 0x8d7d, 0x1fb9: 0x2a51, 0x1fba: 0x2a59, 0x1fbb: 0x2a61,
+ 0x1fbc: 0x2a69, 0x1fbd: 0x2a71, 0x1fbe: 0x2a79, 0x1fbf: 0x2a81,
+ // Block 0x7f, offset 0x1fc0
+ 0x1fc0: 0x2a89, 0x1fc1: 0x2a91, 0x1fc2: 0x2a99, 0x1fc3: 0x2aa1, 0x1fc4: 0x2aa9, 0x1fc5: 0x2ab1,
+ 0x1fc6: 0x2ab1, 0x1fc7: 0x2ab9, 0x1fc8: 0x2ac1, 0x1fc9: 0x2ac9, 0x1fca: 0x2ad1, 0x1fcb: 0x2ad9,
+ 0x1fcc: 0x2ae1, 0x1fcd: 0x2ae9, 0x1fce: 0x2af1, 0x1fcf: 0x2af9, 0x1fd0: 0x2b01, 0x1fd1: 0x2b09,
+ 0x1fd2: 0x2b11, 0x1fd3: 0x2b19, 0x1fd4: 0x2b21, 0x1fd5: 0x2b29, 0x1fd6: 0x2b31, 0x1fd7: 0x2b39,
+ 0x1fd8: 0x2b41, 0x1fd9: 0x8d9d, 0x1fda: 0x2b49, 0x1fdb: 0x2b51, 0x1fdc: 0x2b59, 0x1fdd: 0x2751,
+ 0x1fde: 0x2b61, 0x1fdf: 0x2b69, 0x1fe0: 0x8dbd, 0x1fe1: 0x8ddd, 0x1fe2: 0x2b71, 0x1fe3: 0x2b79,
+ 0x1fe4: 0x2b81, 0x1fe5: 0x2b89, 0x1fe6: 0x2b91, 0x1fe7: 0x2b99, 0x1fe8: 0x2040, 0x1fe9: 0x2ba1,
+ 0x1fea: 0x2ba9, 0x1feb: 0x2ba9, 0x1fec: 0x8dfd, 0x1fed: 0x2bb1, 0x1fee: 0x2bb9, 0x1fef: 0x2bc1,
+ 0x1ff0: 0x2bc9, 0x1ff1: 0x8e1d, 0x1ff2: 0x2bd1, 0x1ff3: 0x2bd9, 0x1ff4: 0x2040, 0x1ff5: 0x2be1,
+ 0x1ff6: 0x2be9, 0x1ff7: 0x2bf1, 0x1ff8: 0x2bf9, 0x1ff9: 0x2c01, 0x1ffa: 0x2c09, 0x1ffb: 0x8e3d,
+ 0x1ffc: 0x2c11, 0x1ffd: 0x8e5d, 0x1ffe: 0x2c19, 0x1fff: 0x2c21,
+ // Block 0x80, offset 0x2000
+ 0x2000: 0x2c29, 0x2001: 0x2c31, 0x2002: 0x2c39, 0x2003: 0x2c41, 0x2004: 0x2c49, 0x2005: 0x2c51,
+ 0x2006: 0x2c59, 0x2007: 0x2c61, 0x2008: 0x2c69, 0x2009: 0x8e7d, 0x200a: 0x2c71, 0x200b: 0x2c79,
+ 0x200c: 0x2c81, 0x200d: 0x2c89, 0x200e: 0x2c91, 0x200f: 0x8e9d, 0x2010: 0x2c99, 0x2011: 0x8ebd,
+ 0x2012: 0x8edd, 0x2013: 0x2ca1, 0x2014: 0x2ca9, 0x2015: 0x2ca9, 0x2016: 0x2cb1, 0x2017: 0x8efd,
+ 0x2018: 0x8f1d, 0x2019: 0x2cb9, 0x201a: 0x2cc1, 0x201b: 0x2cc9, 0x201c: 0x2cd1, 0x201d: 0x2cd9,
+ 0x201e: 0x2ce1, 0x201f: 0x2ce9, 0x2020: 0x2cf1, 0x2021: 0x2cf9, 0x2022: 0x2d01, 0x2023: 0x2d09,
+ 0x2024: 0x8f3d, 0x2025: 0x2d11, 0x2026: 0x2d19, 0x2027: 0x2d21, 0x2028: 0x2d29, 0x2029: 0x2d21,
+ 0x202a: 0x2d31, 0x202b: 0x2d39, 0x202c: 0x2d41, 0x202d: 0x2d49, 0x202e: 0x2d51, 0x202f: 0x2d59,
+ 0x2030: 0x2d61, 0x2031: 0x2d69, 0x2032: 0x2d71, 0x2033: 0x2d79, 0x2034: 0x2d81, 0x2035: 0x2d89,
+ 0x2036: 0x2d91, 0x2037: 0x2d99, 0x2038: 0x8f5d, 0x2039: 0x2da1, 0x203a: 0x2da9, 0x203b: 0x2db1,
+ 0x203c: 0x2db9, 0x203d: 0x2dc1, 0x203e: 0x8f7d, 0x203f: 0x2dc9,
+ // Block 0x81, offset 0x2040
+ 0x2040: 0x2dd1, 0x2041: 0x2dd9, 0x2042: 0x2de1, 0x2043: 0x2de9, 0x2044: 0x2df1, 0x2045: 0x2df9,
+ 0x2046: 0x2e01, 0x2047: 0x2e09, 0x2048: 0x2e11, 0x2049: 0x2e19, 0x204a: 0x8f9d, 0x204b: 0x2e21,
+ 0x204c: 0x2e29, 0x204d: 0x2e31, 0x204e: 0x2e39, 0x204f: 0x2e41, 0x2050: 0x2e49, 0x2051: 0x2e51,
+ 0x2052: 0x2e59, 0x2053: 0x2e61, 0x2054: 0x2e69, 0x2055: 0x2e71, 0x2056: 0x2e79, 0x2057: 0x2e81,
+ 0x2058: 0x2e89, 0x2059: 0x2e91, 0x205a: 0x2e99, 0x205b: 0x2ea1, 0x205c: 0x2ea9, 0x205d: 0x8fbd,
+ 0x205e: 0x2eb1, 0x205f: 0x2eb9, 0x2060: 0x2ec1, 0x2061: 0x2ec9, 0x2062: 0x2ed1, 0x2063: 0x8fdd,
+ 0x2064: 0x2ed9, 0x2065: 0x2ee1, 0x2066: 0x2ee9, 0x2067: 0x2ef1, 0x2068: 0x2ef9, 0x2069: 0x2f01,
+ 0x206a: 0x2f09, 0x206b: 0x2f11, 0x206c: 0x7f0d, 0x206d: 0x2f19, 0x206e: 0x2f21, 0x206f: 0x2f29,
+ 0x2070: 0x8ffd, 0x2071: 0x2f31, 0x2072: 0x2f39, 0x2073: 0x2f41, 0x2074: 0x2f49, 0x2075: 0x2f51,
+ 0x2076: 0x2f59, 0x2077: 0x901d, 0x2078: 0x903d, 0x2079: 0x905d, 0x207a: 0x2f61, 0x207b: 0x907d,
+ 0x207c: 0x2f69, 0x207d: 0x2f71, 0x207e: 0x2f79, 0x207f: 0x2f81,
+ // Block 0x82, offset 0x2080
+ 0x2080: 0x2f89, 0x2081: 0x2f91, 0x2082: 0x2f99, 0x2083: 0x2fa1, 0x2084: 0x2fa9, 0x2085: 0x2fb1,
+ 0x2086: 0x909d, 0x2087: 0x2fb9, 0x2088: 0x2fc1, 0x2089: 0x2fc9, 0x208a: 0x2fd1, 0x208b: 0x2fd9,
+ 0x208c: 0x2fe1, 0x208d: 0x90bd, 0x208e: 0x2fe9, 0x208f: 0x2ff1, 0x2090: 0x90dd, 0x2091: 0x90fd,
+ 0x2092: 0x2ff9, 0x2093: 0x3001, 0x2094: 0x3009, 0x2095: 0x3011, 0x2096: 0x3019, 0x2097: 0x3021,
+ 0x2098: 0x3029, 0x2099: 0x3031, 0x209a: 0x3039, 0x209b: 0x911d, 0x209c: 0x3041, 0x209d: 0x913d,
+ 0x209e: 0x3049, 0x209f: 0x2040, 0x20a0: 0x3051, 0x20a1: 0x3059, 0x20a2: 0x3061, 0x20a3: 0x915d,
+ 0x20a4: 0x3069, 0x20a5: 0x3071, 0x20a6: 0x917d, 0x20a7: 0x919d, 0x20a8: 0x3079, 0x20a9: 0x3081,
+ 0x20aa: 0x3089, 0x20ab: 0x3091, 0x20ac: 0x3099, 0x20ad: 0x3099, 0x20ae: 0x30a1, 0x20af: 0x30a9,
+ 0x20b0: 0x30b1, 0x20b1: 0x30b9, 0x20b2: 0x30c1, 0x20b3: 0x30c9, 0x20b4: 0x30d1, 0x20b5: 0x91bd,
+ 0x20b6: 0x30d9, 0x20b7: 0x91dd, 0x20b8: 0x30e1, 0x20b9: 0x91fd, 0x20ba: 0x30e9, 0x20bb: 0x921d,
+ 0x20bc: 0x923d, 0x20bd: 0x925d, 0x20be: 0x30f1, 0x20bf: 0x30f9,
+ // Block 0x83, offset 0x20c0
+ 0x20c0: 0x3101, 0x20c1: 0x927d, 0x20c2: 0x929d, 0x20c3: 0x92bd, 0x20c4: 0x92dd, 0x20c5: 0x3109,
+ 0x20c6: 0x3111, 0x20c7: 0x3111, 0x20c8: 0x3119, 0x20c9: 0x3121, 0x20ca: 0x3129, 0x20cb: 0x3131,
+ 0x20cc: 0x3139, 0x20cd: 0x92fd, 0x20ce: 0x3141, 0x20cf: 0x3149, 0x20d0: 0x3151, 0x20d1: 0x3159,
+ 0x20d2: 0x931d, 0x20d3: 0x3161, 0x20d4: 0x933d, 0x20d5: 0x935d, 0x20d6: 0x3169, 0x20d7: 0x3171,
+ 0x20d8: 0x3179, 0x20d9: 0x3181, 0x20da: 0x3189, 0x20db: 0x3191, 0x20dc: 0x937d, 0x20dd: 0x939d,
+ 0x20de: 0x93bd, 0x20df: 0x2040, 0x20e0: 0x3199, 0x20e1: 0x93dd, 0x20e2: 0x31a1, 0x20e3: 0x31a9,
+ 0x20e4: 0x31b1, 0x20e5: 0x93fd, 0x20e6: 0x31b9, 0x20e7: 0x31c1, 0x20e8: 0x31c9, 0x20e9: 0x31d1,
+ 0x20ea: 0x31d9, 0x20eb: 0x941d, 0x20ec: 0x31e1, 0x20ed: 0x31e9, 0x20ee: 0x31f1, 0x20ef: 0x31f9,
+ 0x20f0: 0x3201, 0x20f1: 0x3209, 0x20f2: 0x943d, 0x20f3: 0x945d, 0x20f4: 0x3211, 0x20f5: 0x947d,
+ 0x20f6: 0x3219, 0x20f7: 0x949d, 0x20f8: 0x3221, 0x20f9: 0x3229, 0x20fa: 0x3231, 0x20fb: 0x94bd,
+ 0x20fc: 0x94dd, 0x20fd: 0x3239, 0x20fe: 0x94fd, 0x20ff: 0x3241,
+ // Block 0x84, offset 0x2100
+ 0x2100: 0x951d, 0x2101: 0x3249, 0x2102: 0x3251, 0x2103: 0x3259, 0x2104: 0x3261, 0x2105: 0x3269,
+ 0x2106: 0x3271, 0x2107: 0x953d, 0x2108: 0x955d, 0x2109: 0x957d, 0x210a: 0x959d, 0x210b: 0x2ca1,
+ 0x210c: 0x3279, 0x210d: 0x3281, 0x210e: 0x3289, 0x210f: 0x3291, 0x2110: 0x3299, 0x2111: 0x32a1,
+ 0x2112: 0x32a9, 0x2113: 0x32b1, 0x2114: 0x32b9, 0x2115: 0x32c1, 0x2116: 0x32c9, 0x2117: 0x95bd,
+ 0x2118: 0x32d1, 0x2119: 0x32d9, 0x211a: 0x32e1, 0x211b: 0x32e9, 0x211c: 0x32f1, 0x211d: 0x32f9,
+ 0x211e: 0x3301, 0x211f: 0x3309, 0x2120: 0x3311, 0x2121: 0x3319, 0x2122: 0x3321, 0x2123: 0x3329,
+ 0x2124: 0x95dd, 0x2125: 0x95fd, 0x2126: 0x961d, 0x2127: 0x3331, 0x2128: 0x3339, 0x2129: 0x3341,
+ 0x212a: 0x3349, 0x212b: 0x963d, 0x212c: 0x3351, 0x212d: 0x965d, 0x212e: 0x3359, 0x212f: 0x3361,
+ 0x2130: 0x967d, 0x2131: 0x969d, 0x2132: 0x3369, 0x2133: 0x3371, 0x2134: 0x3379, 0x2135: 0x3381,
+ 0x2136: 0x3389, 0x2137: 0x3391, 0x2138: 0x3399, 0x2139: 0x33a1, 0x213a: 0x33a9, 0x213b: 0x33b1,
+ 0x213c: 0x33b9, 0x213d: 0x33c1, 0x213e: 0x33c9, 0x213f: 0x2040,
+ // Block 0x85, offset 0x2140
+ 0x2140: 0x33d1, 0x2141: 0x33d9, 0x2142: 0x33e1, 0x2143: 0x33e9, 0x2144: 0x33f1, 0x2145: 0x96bd,
+ 0x2146: 0x33f9, 0x2147: 0x3401, 0x2148: 0x3409, 0x2149: 0x3411, 0x214a: 0x3419, 0x214b: 0x96dd,
+ 0x214c: 0x96fd, 0x214d: 0x3421, 0x214e: 0x3429, 0x214f: 0x3431, 0x2150: 0x3439, 0x2151: 0x3441,
+ 0x2152: 0x3449, 0x2153: 0x971d, 0x2154: 0x3451, 0x2155: 0x3459, 0x2156: 0x3461, 0x2157: 0x3469,
+ 0x2158: 0x973d, 0x2159: 0x975d, 0x215a: 0x3471, 0x215b: 0x3479, 0x215c: 0x3481, 0x215d: 0x977d,
+ 0x215e: 0x3489, 0x215f: 0x3491, 0x2160: 0x684d, 0x2161: 0x979d, 0x2162: 0x3499, 0x2163: 0x34a1,
+ 0x2164: 0x34a9, 0x2165: 0x97bd, 0x2166: 0x34b1, 0x2167: 0x34b9, 0x2168: 0x34c1, 0x2169: 0x34c9,
+ 0x216a: 0x34d1, 0x216b: 0x34d9, 0x216c: 0x34e1, 0x216d: 0x97dd, 0x216e: 0x34e9, 0x216f: 0x34f1,
+ 0x2170: 0x34f9, 0x2171: 0x97fd, 0x2172: 0x3501, 0x2173: 0x3509, 0x2174: 0x3511, 0x2175: 0x3519,
+ 0x2176: 0x7b6d, 0x2177: 0x981d, 0x2178: 0x3521, 0x2179: 0x3529, 0x217a: 0x3531, 0x217b: 0x983d,
+ 0x217c: 0x3539, 0x217d: 0x985d, 0x217e: 0x3541, 0x217f: 0x3541,
+ // Block 0x86, offset 0x2180
+ 0x2180: 0x3549, 0x2181: 0x987d, 0x2182: 0x3551, 0x2183: 0x3559, 0x2184: 0x3561, 0x2185: 0x3569,
+ 0x2186: 0x3571, 0x2187: 0x3579, 0x2188: 0x3581, 0x2189: 0x989d, 0x218a: 0x3589, 0x218b: 0x3591,
+ 0x218c: 0x3599, 0x218d: 0x35a1, 0x218e: 0x35a9, 0x218f: 0x35b1, 0x2190: 0x98bd, 0x2191: 0x35b9,
+ 0x2192: 0x98dd, 0x2193: 0x98fd, 0x2194: 0x991d, 0x2195: 0x35c1, 0x2196: 0x35c9, 0x2197: 0x35d1,
+ 0x2198: 0x35d9, 0x2199: 0x35e1, 0x219a: 0x35e9, 0x219b: 0x35f1, 0x219c: 0x35f9, 0x219d: 0x993d,
+ 0x219e: 0x0040, 0x219f: 0x0040, 0x21a0: 0x0040, 0x21a1: 0x0040, 0x21a2: 0x0040, 0x21a3: 0x0040,
+ 0x21a4: 0x0040, 0x21a5: 0x0040, 0x21a6: 0x0040, 0x21a7: 0x0040, 0x21a8: 0x0040, 0x21a9: 0x0040,
+ 0x21aa: 0x0040, 0x21ab: 0x0040, 0x21ac: 0x0040, 0x21ad: 0x0040, 0x21ae: 0x0040, 0x21af: 0x0040,
+ 0x21b0: 0x0040, 0x21b1: 0x0040, 0x21b2: 0x0040, 0x21b3: 0x0040, 0x21b4: 0x0040, 0x21b5: 0x0040,
+ 0x21b6: 0x0040, 0x21b7: 0x0040, 0x21b8: 0x0040, 0x21b9: 0x0040, 0x21ba: 0x0040, 0x21bb: 0x0040,
+ 0x21bc: 0x0040, 0x21bd: 0x0040, 0x21be: 0x0040, 0x21bf: 0x0040,
+}
+
+// idnaIndex: 39 blocks, 2496 entries, 4992 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2496]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x85, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x86, 0xca: 0x87, 0xcb: 0x07, 0xcc: 0x88, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x89, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x8a, 0xd6: 0x8b, 0xd7: 0x8c,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x8d, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x8e, 0xde: 0x8f, 0xdf: 0x90,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x07, 0xea: 0x08, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x09, 0xee: 0x0a, 0xef: 0x0b,
+ 0xf0: 0x20, 0xf1: 0x21, 0xf2: 0x21, 0xf3: 0x23, 0xf4: 0x24,
+ // Block 0x4, offset 0x100
+ 0x120: 0x91, 0x121: 0x13, 0x122: 0x14, 0x123: 0x92, 0x124: 0x93, 0x125: 0x15, 0x126: 0x16, 0x127: 0x17,
+ 0x128: 0x18, 0x129: 0x19, 0x12a: 0x1a, 0x12b: 0x1b, 0x12c: 0x1c, 0x12d: 0x1d, 0x12e: 0x1e, 0x12f: 0x94,
+ 0x130: 0x95, 0x131: 0x1f, 0x132: 0x20, 0x133: 0x21, 0x134: 0x96, 0x135: 0x22, 0x136: 0x97, 0x137: 0x98,
+ 0x138: 0x99, 0x139: 0x9a, 0x13a: 0x23, 0x13b: 0x9b, 0x13c: 0x9c, 0x13d: 0x24, 0x13e: 0x25, 0x13f: 0x9d,
+ // Block 0x5, offset 0x140
+ 0x140: 0x9e, 0x141: 0x9f, 0x142: 0xa0, 0x143: 0xa1, 0x144: 0xa2, 0x145: 0xa3, 0x146: 0xa4, 0x147: 0xa5,
+ 0x148: 0xa6, 0x149: 0xa7, 0x14a: 0xa8, 0x14b: 0xa9, 0x14c: 0xaa, 0x14d: 0xab, 0x14e: 0xac, 0x14f: 0xad,
+ 0x150: 0xae, 0x151: 0xa6, 0x152: 0xa6, 0x153: 0xa6, 0x154: 0xa6, 0x155: 0xa6, 0x156: 0xa6, 0x157: 0xa6,
+ 0x158: 0xa6, 0x159: 0xaf, 0x15a: 0xb0, 0x15b: 0xb1, 0x15c: 0xb2, 0x15d: 0xb3, 0x15e: 0xb4, 0x15f: 0xb5,
+ 0x160: 0xb6, 0x161: 0xb7, 0x162: 0xb8, 0x163: 0xb9, 0x164: 0xba, 0x165: 0xbb, 0x166: 0xbc, 0x167: 0xbd,
+ 0x168: 0xbe, 0x169: 0xbf, 0x16a: 0xc0, 0x16b: 0xc1, 0x16c: 0xc2, 0x16d: 0xc3, 0x16e: 0xc4, 0x16f: 0xc5,
+ 0x170: 0xc6, 0x171: 0xc7, 0x172: 0xc8, 0x173: 0xc9, 0x174: 0x26, 0x175: 0x27, 0x176: 0x28, 0x177: 0x88,
+ 0x178: 0x29, 0x179: 0x29, 0x17a: 0x2a, 0x17b: 0x29, 0x17c: 0xca, 0x17d: 0x2b, 0x17e: 0x2c, 0x17f: 0x2d,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2e, 0x181: 0x2f, 0x182: 0x30, 0x183: 0xcb, 0x184: 0x31, 0x185: 0x32, 0x186: 0xcc, 0x187: 0xa2,
+ 0x188: 0xcd, 0x189: 0xce, 0x18a: 0xa2, 0x18b: 0xa2, 0x18c: 0xcf, 0x18d: 0xa2, 0x18e: 0xa2, 0x18f: 0xa2,
+ 0x190: 0xd0, 0x191: 0x33, 0x192: 0x34, 0x193: 0x35, 0x194: 0xa2, 0x195: 0xa2, 0x196: 0xa2, 0x197: 0xa2,
+ 0x198: 0xa2, 0x199: 0xa2, 0x19a: 0xa2, 0x19b: 0xa2, 0x19c: 0xa2, 0x19d: 0xa2, 0x19e: 0xa2, 0x19f: 0xa2,
+ 0x1a0: 0xa2, 0x1a1: 0xa2, 0x1a2: 0xa2, 0x1a3: 0xa2, 0x1a4: 0xa2, 0x1a5: 0xa2, 0x1a6: 0xa2, 0x1a7: 0xa2,
+ 0x1a8: 0xd1, 0x1a9: 0xd2, 0x1aa: 0xa2, 0x1ab: 0xd3, 0x1ac: 0xa2, 0x1ad: 0xd4, 0x1ae: 0xd5, 0x1af: 0xa2,
+ 0x1b0: 0xd6, 0x1b1: 0x36, 0x1b2: 0x29, 0x1b3: 0x37, 0x1b4: 0xd7, 0x1b5: 0xd8, 0x1b6: 0xd9, 0x1b7: 0xda,
+ 0x1b8: 0xdb, 0x1b9: 0xdc, 0x1ba: 0xdd, 0x1bb: 0xde, 0x1bc: 0xdf, 0x1bd: 0xe0, 0x1be: 0xe1, 0x1bf: 0x38,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x39, 0x1c1: 0xe2, 0x1c2: 0xe3, 0x1c3: 0xe4, 0x1c4: 0xe5, 0x1c5: 0x3a, 0x1c6: 0x3b, 0x1c7: 0xe6,
+ 0x1c8: 0xe7, 0x1c9: 0x3c, 0x1ca: 0x3d, 0x1cb: 0x3e, 0x1cc: 0xe8, 0x1cd: 0xe9, 0x1ce: 0x3f, 0x1cf: 0x40,
+ 0x1d0: 0xa6, 0x1d1: 0xa6, 0x1d2: 0xa6, 0x1d3: 0xa6, 0x1d4: 0xa6, 0x1d5: 0xa6, 0x1d6: 0xa6, 0x1d7: 0xa6,
+ 0x1d8: 0xa6, 0x1d9: 0xa6, 0x1da: 0xa6, 0x1db: 0xa6, 0x1dc: 0xa6, 0x1dd: 0xa6, 0x1de: 0xa6, 0x1df: 0xa6,
+ 0x1e0: 0xa6, 0x1e1: 0xa6, 0x1e2: 0xa6, 0x1e3: 0xa6, 0x1e4: 0xa6, 0x1e5: 0xa6, 0x1e6: 0xa6, 0x1e7: 0xa6,
+ 0x1e8: 0xa6, 0x1e9: 0xa6, 0x1ea: 0xa6, 0x1eb: 0xa6, 0x1ec: 0xa6, 0x1ed: 0xa6, 0x1ee: 0xa6, 0x1ef: 0xa6,
+ 0x1f0: 0xa6, 0x1f1: 0xa6, 0x1f2: 0xa6, 0x1f3: 0xa6, 0x1f4: 0xa6, 0x1f5: 0xa6, 0x1f6: 0xa6, 0x1f7: 0xa6,
+ 0x1f8: 0xa6, 0x1f9: 0xa6, 0x1fa: 0xa6, 0x1fb: 0xa6, 0x1fc: 0xa6, 0x1fd: 0xa6, 0x1fe: 0xa6, 0x1ff: 0xa6,
+ // Block 0x8, offset 0x200
+ 0x200: 0xa6, 0x201: 0xa6, 0x202: 0xa6, 0x203: 0xa6, 0x204: 0xa6, 0x205: 0xa6, 0x206: 0xa6, 0x207: 0xa6,
+ 0x208: 0xa6, 0x209: 0xa6, 0x20a: 0xa6, 0x20b: 0xa6, 0x20c: 0xa6, 0x20d: 0xa6, 0x20e: 0xa6, 0x20f: 0xa6,
+ 0x210: 0xa6, 0x211: 0xa6, 0x212: 0xa6, 0x213: 0xa6, 0x214: 0xa6, 0x215: 0xa6, 0x216: 0xa6, 0x217: 0xa6,
+ 0x218: 0xa6, 0x219: 0xa6, 0x21a: 0xa6, 0x21b: 0xa6, 0x21c: 0xa6, 0x21d: 0xa6, 0x21e: 0xa6, 0x21f: 0xa6,
+ 0x220: 0xa6, 0x221: 0xa6, 0x222: 0xa6, 0x223: 0xa6, 0x224: 0xa6, 0x225: 0xa6, 0x226: 0xa6, 0x227: 0xa6,
+ 0x228: 0xa6, 0x229: 0xa6, 0x22a: 0xa6, 0x22b: 0xa6, 0x22c: 0xa6, 0x22d: 0xa6, 0x22e: 0xa6, 0x22f: 0xa6,
+ 0x230: 0xa6, 0x231: 0xa6, 0x232: 0xa6, 0x233: 0xa6, 0x234: 0xa6, 0x235: 0xa6, 0x236: 0xa6, 0x237: 0xa2,
+ 0x238: 0xa6, 0x239: 0xa6, 0x23a: 0xa6, 0x23b: 0xa6, 0x23c: 0xa6, 0x23d: 0xa6, 0x23e: 0xa6, 0x23f: 0xa6,
+ // Block 0x9, offset 0x240
+ 0x240: 0xa6, 0x241: 0xa6, 0x242: 0xa6, 0x243: 0xa6, 0x244: 0xa6, 0x245: 0xa6, 0x246: 0xa6, 0x247: 0xa6,
+ 0x248: 0xa6, 0x249: 0xa6, 0x24a: 0xa6, 0x24b: 0xa6, 0x24c: 0xa6, 0x24d: 0xa6, 0x24e: 0xa6, 0x24f: 0xa6,
+ 0x250: 0xa6, 0x251: 0xa6, 0x252: 0xa6, 0x253: 0xa6, 0x254: 0xa6, 0x255: 0xa6, 0x256: 0xa6, 0x257: 0xa6,
+ 0x258: 0xa6, 0x259: 0xa6, 0x25a: 0xa6, 0x25b: 0xa6, 0x25c: 0xa6, 0x25d: 0xa6, 0x25e: 0xa6, 0x25f: 0xa6,
+ 0x260: 0xa6, 0x261: 0xa6, 0x262: 0xa6, 0x263: 0xa6, 0x264: 0xa6, 0x265: 0xa6, 0x266: 0xa6, 0x267: 0xa6,
+ 0x268: 0xa6, 0x269: 0xa6, 0x26a: 0xa6, 0x26b: 0xa6, 0x26c: 0xa6, 0x26d: 0xa6, 0x26e: 0xa6, 0x26f: 0xa6,
+ 0x270: 0xa6, 0x271: 0xa6, 0x272: 0xa6, 0x273: 0xa6, 0x274: 0xa6, 0x275: 0xa6, 0x276: 0xa6, 0x277: 0xa6,
+ 0x278: 0xa6, 0x279: 0xa6, 0x27a: 0xa6, 0x27b: 0xa6, 0x27c: 0xa6, 0x27d: 0xa6, 0x27e: 0xa6, 0x27f: 0xa6,
+ // Block 0xa, offset 0x280
+ 0x280: 0xa6, 0x281: 0xa6, 0x282: 0xa6, 0x283: 0xa6, 0x284: 0xa6, 0x285: 0xa6, 0x286: 0xa6, 0x287: 0xa6,
+ 0x288: 0xa6, 0x289: 0xa6, 0x28a: 0xa6, 0x28b: 0xa6, 0x28c: 0xa6, 0x28d: 0xa6, 0x28e: 0xa6, 0x28f: 0xa6,
+ 0x290: 0xa6, 0x291: 0xa6, 0x292: 0xea, 0x293: 0xeb, 0x294: 0xa6, 0x295: 0xa6, 0x296: 0xa6, 0x297: 0xa6,
+ 0x298: 0xec, 0x299: 0x41, 0x29a: 0x42, 0x29b: 0xed, 0x29c: 0x43, 0x29d: 0x44, 0x29e: 0x45, 0x29f: 0x46,
+ 0x2a0: 0xee, 0x2a1: 0xef, 0x2a2: 0xf0, 0x2a3: 0xf1, 0x2a4: 0xf2, 0x2a5: 0xf3, 0x2a6: 0xf4, 0x2a7: 0xf5,
+ 0x2a8: 0xf6, 0x2a9: 0xf7, 0x2aa: 0xf8, 0x2ab: 0xf9, 0x2ac: 0xfa, 0x2ad: 0xfb, 0x2ae: 0xfc, 0x2af: 0xfd,
+ 0x2b0: 0xa6, 0x2b1: 0xa6, 0x2b2: 0xa6, 0x2b3: 0xa6, 0x2b4: 0xa6, 0x2b5: 0xa6, 0x2b6: 0xa6, 0x2b7: 0xa6,
+ 0x2b8: 0xa6, 0x2b9: 0xa6, 0x2ba: 0xa6, 0x2bb: 0xa6, 0x2bc: 0xa6, 0x2bd: 0xa6, 0x2be: 0xa6, 0x2bf: 0xa6,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0xa6, 0x2c1: 0xa6, 0x2c2: 0xa6, 0x2c3: 0xa6, 0x2c4: 0xa6, 0x2c5: 0xa6, 0x2c6: 0xa6, 0x2c7: 0xa6,
+ 0x2c8: 0xa6, 0x2c9: 0xa6, 0x2ca: 0xa6, 0x2cb: 0xa6, 0x2cc: 0xa6, 0x2cd: 0xa6, 0x2ce: 0xa6, 0x2cf: 0xa6,
+ 0x2d0: 0xa6, 0x2d1: 0xa6, 0x2d2: 0xa6, 0x2d3: 0xa6, 0x2d4: 0xa6, 0x2d5: 0xa6, 0x2d6: 0xa6, 0x2d7: 0xa6,
+ 0x2d8: 0xa6, 0x2d9: 0xa6, 0x2da: 0xa6, 0x2db: 0xa6, 0x2dc: 0xa6, 0x2dd: 0xa6, 0x2de: 0xfe, 0x2df: 0xff,
+ // Block 0xc, offset 0x300
+ 0x300: 0x100, 0x301: 0x100, 0x302: 0x100, 0x303: 0x100, 0x304: 0x100, 0x305: 0x100, 0x306: 0x100, 0x307: 0x100,
+ 0x308: 0x100, 0x309: 0x100, 0x30a: 0x100, 0x30b: 0x100, 0x30c: 0x100, 0x30d: 0x100, 0x30e: 0x100, 0x30f: 0x100,
+ 0x310: 0x100, 0x311: 0x100, 0x312: 0x100, 0x313: 0x100, 0x314: 0x100, 0x315: 0x100, 0x316: 0x100, 0x317: 0x100,
+ 0x318: 0x100, 0x319: 0x100, 0x31a: 0x100, 0x31b: 0x100, 0x31c: 0x100, 0x31d: 0x100, 0x31e: 0x100, 0x31f: 0x100,
+ 0x320: 0x100, 0x321: 0x100, 0x322: 0x100, 0x323: 0x100, 0x324: 0x100, 0x325: 0x100, 0x326: 0x100, 0x327: 0x100,
+ 0x328: 0x100, 0x329: 0x100, 0x32a: 0x100, 0x32b: 0x100, 0x32c: 0x100, 0x32d: 0x100, 0x32e: 0x100, 0x32f: 0x100,
+ 0x330: 0x100, 0x331: 0x100, 0x332: 0x100, 0x333: 0x100, 0x334: 0x100, 0x335: 0x100, 0x336: 0x100, 0x337: 0x100,
+ 0x338: 0x100, 0x339: 0x100, 0x33a: 0x100, 0x33b: 0x100, 0x33c: 0x100, 0x33d: 0x100, 0x33e: 0x100, 0x33f: 0x100,
+ // Block 0xd, offset 0x340
+ 0x340: 0x100, 0x341: 0x100, 0x342: 0x100, 0x343: 0x100, 0x344: 0x100, 0x345: 0x100, 0x346: 0x100, 0x347: 0x100,
+ 0x348: 0x100, 0x349: 0x100, 0x34a: 0x100, 0x34b: 0x100, 0x34c: 0x100, 0x34d: 0x100, 0x34e: 0x100, 0x34f: 0x100,
+ 0x350: 0x100, 0x351: 0x100, 0x352: 0x100, 0x353: 0x100, 0x354: 0x100, 0x355: 0x100, 0x356: 0x100, 0x357: 0x100,
+ 0x358: 0x100, 0x359: 0x100, 0x35a: 0x100, 0x35b: 0x100, 0x35c: 0x100, 0x35d: 0x100, 0x35e: 0x100, 0x35f: 0x100,
+ 0x360: 0x100, 0x361: 0x100, 0x362: 0x100, 0x363: 0x100, 0x364: 0x101, 0x365: 0x102, 0x366: 0x103, 0x367: 0x104,
+ 0x368: 0x47, 0x369: 0x105, 0x36a: 0x106, 0x36b: 0x48, 0x36c: 0x49, 0x36d: 0x4a, 0x36e: 0x4b, 0x36f: 0x4c,
+ 0x370: 0x107, 0x371: 0x4d, 0x372: 0x4e, 0x373: 0x4f, 0x374: 0x50, 0x375: 0x51, 0x376: 0x108, 0x377: 0x52,
+ 0x378: 0x53, 0x379: 0x54, 0x37a: 0x55, 0x37b: 0x56, 0x37c: 0x57, 0x37d: 0x58, 0x37e: 0x59, 0x37f: 0x5a,
+ // Block 0xe, offset 0x380
+ 0x380: 0x109, 0x381: 0x10a, 0x382: 0xa6, 0x383: 0x10b, 0x384: 0x10c, 0x385: 0xa2, 0x386: 0x10d, 0x387: 0x10e,
+ 0x388: 0x100, 0x389: 0x100, 0x38a: 0x10f, 0x38b: 0x110, 0x38c: 0x111, 0x38d: 0x112, 0x38e: 0x113, 0x38f: 0x114,
+ 0x390: 0x115, 0x391: 0xa6, 0x392: 0x116, 0x393: 0x117, 0x394: 0x118, 0x395: 0x5b, 0x396: 0x5c, 0x397: 0x100,
+ 0x398: 0xa6, 0x399: 0xa6, 0x39a: 0xa6, 0x39b: 0xa6, 0x39c: 0x119, 0x39d: 0x11a, 0x39e: 0x5d, 0x39f: 0x100,
+ 0x3a0: 0x11b, 0x3a1: 0x11c, 0x3a2: 0x11d, 0x3a3: 0x11e, 0x3a4: 0x11f, 0x3a5: 0x100, 0x3a6: 0x120, 0x3a7: 0x121,
+ 0x3a8: 0x122, 0x3a9: 0x123, 0x3aa: 0x124, 0x3ab: 0x5e, 0x3ac: 0x125, 0x3ad: 0x126, 0x3ae: 0x5f, 0x3af: 0x100,
+ 0x3b0: 0x127, 0x3b1: 0x128, 0x3b2: 0x129, 0x3b3: 0x12a, 0x3b4: 0x12b, 0x3b5: 0x100, 0x3b6: 0x100, 0x3b7: 0x100,
+ 0x3b8: 0x100, 0x3b9: 0x12c, 0x3ba: 0x12d, 0x3bb: 0x12e, 0x3bc: 0x12f, 0x3bd: 0x130, 0x3be: 0x131, 0x3bf: 0x132,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x133, 0x3c1: 0x134, 0x3c2: 0x135, 0x3c3: 0x136, 0x3c4: 0x137, 0x3c5: 0x138, 0x3c6: 0x139, 0x3c7: 0x13a,
+ 0x3c8: 0x13b, 0x3c9: 0x13c, 0x3ca: 0x13d, 0x3cb: 0x13e, 0x3cc: 0x60, 0x3cd: 0x61, 0x3ce: 0x100, 0x3cf: 0x100,
+ 0x3d0: 0x13f, 0x3d1: 0x140, 0x3d2: 0x141, 0x3d3: 0x142, 0x3d4: 0x100, 0x3d5: 0x100, 0x3d6: 0x143, 0x3d7: 0x144,
+ 0x3d8: 0x145, 0x3d9: 0x146, 0x3da: 0x147, 0x3db: 0x148, 0x3dc: 0x149, 0x3dd: 0x14a, 0x3de: 0x100, 0x3df: 0x100,
+ 0x3e0: 0x14b, 0x3e1: 0x100, 0x3e2: 0x14c, 0x3e3: 0x14d, 0x3e4: 0x62, 0x3e5: 0x14e, 0x3e6: 0x14f, 0x3e7: 0x150,
+ 0x3e8: 0x151, 0x3e9: 0x152, 0x3ea: 0x153, 0x3eb: 0x154, 0x3ec: 0x155, 0x3ed: 0x100, 0x3ee: 0x100, 0x3ef: 0x100,
+ 0x3f0: 0x156, 0x3f1: 0x157, 0x3f2: 0x158, 0x3f3: 0x100, 0x3f4: 0x159, 0x3f5: 0x15a, 0x3f6: 0x15b, 0x3f7: 0x100,
+ 0x3f8: 0x100, 0x3f9: 0x100, 0x3fa: 0x100, 0x3fb: 0x15c, 0x3fc: 0x15d, 0x3fd: 0x15e, 0x3fe: 0x15f, 0x3ff: 0x160,
+ // Block 0x10, offset 0x400
+ 0x400: 0xa6, 0x401: 0xa6, 0x402: 0xa6, 0x403: 0xa6, 0x404: 0xa6, 0x405: 0xa6, 0x406: 0xa6, 0x407: 0xa6,
+ 0x408: 0xa6, 0x409: 0xa6, 0x40a: 0xa6, 0x40b: 0xa6, 0x40c: 0xa6, 0x40d: 0xa6, 0x40e: 0x161, 0x40f: 0x100,
+ 0x410: 0xa2, 0x411: 0x162, 0x412: 0xa6, 0x413: 0xa6, 0x414: 0xa6, 0x415: 0x163, 0x416: 0x100, 0x417: 0x100,
+ 0x418: 0x100, 0x419: 0x100, 0x41a: 0x100, 0x41b: 0x100, 0x41c: 0x100, 0x41d: 0x100, 0x41e: 0x100, 0x41f: 0x100,
+ 0x420: 0x100, 0x421: 0x100, 0x422: 0x100, 0x423: 0x100, 0x424: 0x100, 0x425: 0x100, 0x426: 0x100, 0x427: 0x100,
+ 0x428: 0x100, 0x429: 0x100, 0x42a: 0x100, 0x42b: 0x100, 0x42c: 0x100, 0x42d: 0x100, 0x42e: 0x100, 0x42f: 0x100,
+ 0x430: 0x100, 0x431: 0x100, 0x432: 0x100, 0x433: 0x100, 0x434: 0x100, 0x435: 0x100, 0x436: 0x100, 0x437: 0x100,
+ 0x438: 0x100, 0x439: 0x100, 0x43a: 0x100, 0x43b: 0x100, 0x43c: 0x100, 0x43d: 0x100, 0x43e: 0x164, 0x43f: 0x165,
+ // Block 0x11, offset 0x440
+ 0x440: 0xa6, 0x441: 0xa6, 0x442: 0xa6, 0x443: 0xa6, 0x444: 0xa6, 0x445: 0xa6, 0x446: 0xa6, 0x447: 0xa6,
+ 0x448: 0xa6, 0x449: 0xa6, 0x44a: 0xa6, 0x44b: 0xa6, 0x44c: 0xa6, 0x44d: 0xa6, 0x44e: 0xa6, 0x44f: 0xa6,
+ 0x450: 0x166, 0x451: 0x167, 0x452: 0x100, 0x453: 0x100, 0x454: 0x100, 0x455: 0x100, 0x456: 0x100, 0x457: 0x100,
+ 0x458: 0x100, 0x459: 0x100, 0x45a: 0x100, 0x45b: 0x100, 0x45c: 0x100, 0x45d: 0x100, 0x45e: 0x100, 0x45f: 0x100,
+ 0x460: 0x100, 0x461: 0x100, 0x462: 0x100, 0x463: 0x100, 0x464: 0x100, 0x465: 0x100, 0x466: 0x100, 0x467: 0x100,
+ 0x468: 0x100, 0x469: 0x100, 0x46a: 0x100, 0x46b: 0x100, 0x46c: 0x100, 0x46d: 0x100, 0x46e: 0x100, 0x46f: 0x100,
+ 0x470: 0x100, 0x471: 0x100, 0x472: 0x100, 0x473: 0x100, 0x474: 0x100, 0x475: 0x100, 0x476: 0x100, 0x477: 0x100,
+ 0x478: 0x100, 0x479: 0x100, 0x47a: 0x100, 0x47b: 0x100, 0x47c: 0x100, 0x47d: 0x100, 0x47e: 0x100, 0x47f: 0x100,
+ // Block 0x12, offset 0x480
+ 0x480: 0x100, 0x481: 0x100, 0x482: 0x100, 0x483: 0x100, 0x484: 0x100, 0x485: 0x100, 0x486: 0x100, 0x487: 0x100,
+ 0x488: 0x100, 0x489: 0x100, 0x48a: 0x100, 0x48b: 0x100, 0x48c: 0x100, 0x48d: 0x100, 0x48e: 0x100, 0x48f: 0x100,
+ 0x490: 0xa6, 0x491: 0xa6, 0x492: 0xa6, 0x493: 0xa6, 0x494: 0xa6, 0x495: 0xa6, 0x496: 0xa6, 0x497: 0xa6,
+ 0x498: 0xa6, 0x499: 0x14a, 0x49a: 0x100, 0x49b: 0x100, 0x49c: 0x100, 0x49d: 0x100, 0x49e: 0x100, 0x49f: 0x100,
+ 0x4a0: 0x100, 0x4a1: 0x100, 0x4a2: 0x100, 0x4a3: 0x100, 0x4a4: 0x100, 0x4a5: 0x100, 0x4a6: 0x100, 0x4a7: 0x100,
+ 0x4a8: 0x100, 0x4a9: 0x100, 0x4aa: 0x100, 0x4ab: 0x100, 0x4ac: 0x100, 0x4ad: 0x100, 0x4ae: 0x100, 0x4af: 0x100,
+ 0x4b0: 0x100, 0x4b1: 0x100, 0x4b2: 0x100, 0x4b3: 0x100, 0x4b4: 0x100, 0x4b5: 0x100, 0x4b6: 0x100, 0x4b7: 0x100,
+ 0x4b8: 0x100, 0x4b9: 0x100, 0x4ba: 0x100, 0x4bb: 0x100, 0x4bc: 0x100, 0x4bd: 0x100, 0x4be: 0x100, 0x4bf: 0x100,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x100, 0x4c1: 0x100, 0x4c2: 0x100, 0x4c3: 0x100, 0x4c4: 0x100, 0x4c5: 0x100, 0x4c6: 0x100, 0x4c7: 0x100,
+ 0x4c8: 0x100, 0x4c9: 0x100, 0x4ca: 0x100, 0x4cb: 0x100, 0x4cc: 0x100, 0x4cd: 0x100, 0x4ce: 0x100, 0x4cf: 0x100,
+ 0x4d0: 0x100, 0x4d1: 0x100, 0x4d2: 0x100, 0x4d3: 0x100, 0x4d4: 0x100, 0x4d5: 0x100, 0x4d6: 0x100, 0x4d7: 0x100,
+ 0x4d8: 0x100, 0x4d9: 0x100, 0x4da: 0x100, 0x4db: 0x100, 0x4dc: 0x100, 0x4dd: 0x100, 0x4de: 0x100, 0x4df: 0x100,
+ 0x4e0: 0xa6, 0x4e1: 0xa6, 0x4e2: 0xa6, 0x4e3: 0xa6, 0x4e4: 0xa6, 0x4e5: 0xa6, 0x4e6: 0xa6, 0x4e7: 0xa6,
+ 0x4e8: 0x154, 0x4e9: 0x168, 0x4ea: 0x169, 0x4eb: 0x16a, 0x4ec: 0x16b, 0x4ed: 0x16c, 0x4ee: 0x16d, 0x4ef: 0x100,
+ 0x4f0: 0x100, 0x4f1: 0x100, 0x4f2: 0x100, 0x4f3: 0x100, 0x4f4: 0x100, 0x4f5: 0x100, 0x4f6: 0x100, 0x4f7: 0x100,
+ 0x4f8: 0x100, 0x4f9: 0x16e, 0x4fa: 0x16f, 0x4fb: 0x100, 0x4fc: 0xa6, 0x4fd: 0x170, 0x4fe: 0x171, 0x4ff: 0x172,
+ // Block 0x14, offset 0x500
+ 0x500: 0xa6, 0x501: 0xa6, 0x502: 0xa6, 0x503: 0xa6, 0x504: 0xa6, 0x505: 0xa6, 0x506: 0xa6, 0x507: 0xa6,
+ 0x508: 0xa6, 0x509: 0xa6, 0x50a: 0xa6, 0x50b: 0xa6, 0x50c: 0xa6, 0x50d: 0xa6, 0x50e: 0xa6, 0x50f: 0xa6,
+ 0x510: 0xa6, 0x511: 0xa6, 0x512: 0xa6, 0x513: 0xa6, 0x514: 0xa6, 0x515: 0xa6, 0x516: 0xa6, 0x517: 0xa6,
+ 0x518: 0xa6, 0x519: 0xa6, 0x51a: 0xa6, 0x51b: 0xa6, 0x51c: 0xa6, 0x51d: 0xa6, 0x51e: 0xa6, 0x51f: 0x173,
+ 0x520: 0xa6, 0x521: 0xa6, 0x522: 0xa6, 0x523: 0xa6, 0x524: 0xa6, 0x525: 0xa6, 0x526: 0xa6, 0x527: 0xa6,
+ 0x528: 0xa6, 0x529: 0xa6, 0x52a: 0xa6, 0x52b: 0xa6, 0x52c: 0xa6, 0x52d: 0xa6, 0x52e: 0xa6, 0x52f: 0xa6,
+ 0x530: 0xa6, 0x531: 0xa6, 0x532: 0xa6, 0x533: 0x174, 0x534: 0x175, 0x535: 0x100, 0x536: 0x100, 0x537: 0x100,
+ 0x538: 0x100, 0x539: 0x100, 0x53a: 0x100, 0x53b: 0x100, 0x53c: 0x100, 0x53d: 0x100, 0x53e: 0x100, 0x53f: 0x100,
+ // Block 0x15, offset 0x540
+ 0x540: 0x100, 0x541: 0x100, 0x542: 0x100, 0x543: 0x100, 0x544: 0x100, 0x545: 0x100, 0x546: 0x100, 0x547: 0x100,
+ 0x548: 0x100, 0x549: 0x100, 0x54a: 0x100, 0x54b: 0x100, 0x54c: 0x100, 0x54d: 0x100, 0x54e: 0x100, 0x54f: 0x100,
+ 0x550: 0x100, 0x551: 0x100, 0x552: 0x100, 0x553: 0x100, 0x554: 0x100, 0x555: 0x100, 0x556: 0x100, 0x557: 0x100,
+ 0x558: 0x100, 0x559: 0x100, 0x55a: 0x100, 0x55b: 0x100, 0x55c: 0x100, 0x55d: 0x100, 0x55e: 0x100, 0x55f: 0x100,
+ 0x560: 0x100, 0x561: 0x100, 0x562: 0x100, 0x563: 0x100, 0x564: 0x100, 0x565: 0x100, 0x566: 0x100, 0x567: 0x100,
+ 0x568: 0x100, 0x569: 0x100, 0x56a: 0x100, 0x56b: 0x100, 0x56c: 0x100, 0x56d: 0x100, 0x56e: 0x100, 0x56f: 0x100,
+ 0x570: 0x100, 0x571: 0x100, 0x572: 0x100, 0x573: 0x100, 0x574: 0x100, 0x575: 0x100, 0x576: 0x100, 0x577: 0x100,
+ 0x578: 0x100, 0x579: 0x100, 0x57a: 0x100, 0x57b: 0x100, 0x57c: 0x100, 0x57d: 0x100, 0x57e: 0x100, 0x57f: 0x176,
+ // Block 0x16, offset 0x580
+ 0x580: 0xa6, 0x581: 0xa6, 0x582: 0xa6, 0x583: 0xa6, 0x584: 0x177, 0x585: 0x178, 0x586: 0xa6, 0x587: 0xa6,
+ 0x588: 0xa6, 0x589: 0xa6, 0x58a: 0xa6, 0x58b: 0x179, 0x58c: 0x100, 0x58d: 0x100, 0x58e: 0x100, 0x58f: 0x100,
+ 0x590: 0x100, 0x591: 0x100, 0x592: 0x100, 0x593: 0x100, 0x594: 0x100, 0x595: 0x100, 0x596: 0x100, 0x597: 0x100,
+ 0x598: 0x100, 0x599: 0x100, 0x59a: 0x100, 0x59b: 0x100, 0x59c: 0x100, 0x59d: 0x100, 0x59e: 0x100, 0x59f: 0x100,
+ 0x5a0: 0x100, 0x5a1: 0x100, 0x5a2: 0x100, 0x5a3: 0x100, 0x5a4: 0x100, 0x5a5: 0x100, 0x5a6: 0x100, 0x5a7: 0x100,
+ 0x5a8: 0x100, 0x5a9: 0x100, 0x5aa: 0x100, 0x5ab: 0x100, 0x5ac: 0x100, 0x5ad: 0x100, 0x5ae: 0x100, 0x5af: 0x100,
+ 0x5b0: 0xa6, 0x5b1: 0x17a, 0x5b2: 0x17b, 0x5b3: 0x100, 0x5b4: 0x100, 0x5b5: 0x100, 0x5b6: 0x100, 0x5b7: 0x100,
+ 0x5b8: 0x100, 0x5b9: 0x100, 0x5ba: 0x100, 0x5bb: 0x100, 0x5bc: 0x100, 0x5bd: 0x100, 0x5be: 0x100, 0x5bf: 0x100,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x100, 0x5c1: 0x100, 0x5c2: 0x100, 0x5c3: 0x100, 0x5c4: 0x100, 0x5c5: 0x100, 0x5c6: 0x100, 0x5c7: 0x100,
+ 0x5c8: 0x100, 0x5c9: 0x100, 0x5ca: 0x100, 0x5cb: 0x100, 0x5cc: 0x100, 0x5cd: 0x100, 0x5ce: 0x100, 0x5cf: 0x100,
+ 0x5d0: 0x100, 0x5d1: 0x100, 0x5d2: 0x100, 0x5d3: 0x100, 0x5d4: 0x100, 0x5d5: 0x100, 0x5d6: 0x100, 0x5d7: 0x100,
+ 0x5d8: 0x100, 0x5d9: 0x100, 0x5da: 0x100, 0x5db: 0x100, 0x5dc: 0x100, 0x5dd: 0x100, 0x5de: 0x100, 0x5df: 0x100,
+ 0x5e0: 0x100, 0x5e1: 0x100, 0x5e2: 0x100, 0x5e3: 0x100, 0x5e4: 0x100, 0x5e5: 0x100, 0x5e6: 0x100, 0x5e7: 0x100,
+ 0x5e8: 0x100, 0x5e9: 0x100, 0x5ea: 0x100, 0x5eb: 0x100, 0x5ec: 0x100, 0x5ed: 0x100, 0x5ee: 0x100, 0x5ef: 0x100,
+ 0x5f0: 0x100, 0x5f1: 0x100, 0x5f2: 0x100, 0x5f3: 0x100, 0x5f4: 0x100, 0x5f5: 0x100, 0x5f6: 0x100, 0x5f7: 0x100,
+ 0x5f8: 0x100, 0x5f9: 0x100, 0x5fa: 0x100, 0x5fb: 0x100, 0x5fc: 0x17c, 0x5fd: 0x17d, 0x5fe: 0xa2, 0x5ff: 0x17e,
+ // Block 0x18, offset 0x600
+ 0x600: 0xa2, 0x601: 0xa2, 0x602: 0xa2, 0x603: 0x17f, 0x604: 0x180, 0x605: 0x181, 0x606: 0x182, 0x607: 0x183,
+ 0x608: 0xa2, 0x609: 0x184, 0x60a: 0x100, 0x60b: 0x185, 0x60c: 0xa2, 0x60d: 0x186, 0x60e: 0x100, 0x60f: 0x100,
+ 0x610: 0x63, 0x611: 0x64, 0x612: 0x65, 0x613: 0x66, 0x614: 0x67, 0x615: 0x68, 0x616: 0x69, 0x617: 0x6a,
+ 0x618: 0x6b, 0x619: 0x6c, 0x61a: 0x6d, 0x61b: 0x6e, 0x61c: 0x6f, 0x61d: 0x70, 0x61e: 0x71, 0x61f: 0x72,
+ 0x620: 0xa2, 0x621: 0xa2, 0x622: 0xa2, 0x623: 0xa2, 0x624: 0xa2, 0x625: 0xa2, 0x626: 0xa2, 0x627: 0xa2,
+ 0x628: 0x187, 0x629: 0x188, 0x62a: 0x189, 0x62b: 0x100, 0x62c: 0x100, 0x62d: 0x100, 0x62e: 0x100, 0x62f: 0x100,
+ 0x630: 0x100, 0x631: 0x100, 0x632: 0x100, 0x633: 0x100, 0x634: 0x100, 0x635: 0x100, 0x636: 0x100, 0x637: 0x100,
+ 0x638: 0x100, 0x639: 0x100, 0x63a: 0x100, 0x63b: 0x100, 0x63c: 0x18a, 0x63d: 0x100, 0x63e: 0x100, 0x63f: 0x100,
+ // Block 0x19, offset 0x640
+ 0x640: 0x73, 0x641: 0x74, 0x642: 0x18b, 0x643: 0x100, 0x644: 0x18c, 0x645: 0x18d, 0x646: 0x100, 0x647: 0x100,
+ 0x648: 0x100, 0x649: 0x100, 0x64a: 0x18e, 0x64b: 0x18f, 0x64c: 0x100, 0x64d: 0x100, 0x64e: 0x100, 0x64f: 0x100,
+ 0x650: 0x100, 0x651: 0x100, 0x652: 0x100, 0x653: 0x190, 0x654: 0x100, 0x655: 0x100, 0x656: 0x100, 0x657: 0x100,
+ 0x658: 0x100, 0x659: 0x100, 0x65a: 0x100, 0x65b: 0x100, 0x65c: 0x100, 0x65d: 0x100, 0x65e: 0x100, 0x65f: 0x191,
+ 0x660: 0x127, 0x661: 0x127, 0x662: 0x127, 0x663: 0x192, 0x664: 0x75, 0x665: 0x193, 0x666: 0x100, 0x667: 0x100,
+ 0x668: 0x100, 0x669: 0x100, 0x66a: 0x100, 0x66b: 0x100, 0x66c: 0x100, 0x66d: 0x100, 0x66e: 0x100, 0x66f: 0x100,
+ 0x670: 0x100, 0x671: 0x194, 0x672: 0x195, 0x673: 0x100, 0x674: 0x196, 0x675: 0x100, 0x676: 0x100, 0x677: 0x100,
+ 0x678: 0x76, 0x679: 0x77, 0x67a: 0x78, 0x67b: 0x197, 0x67c: 0x100, 0x67d: 0x100, 0x67e: 0x100, 0x67f: 0x100,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x198, 0x681: 0xa2, 0x682: 0x199, 0x683: 0x19a, 0x684: 0x79, 0x685: 0x7a, 0x686: 0x19b, 0x687: 0x19c,
+ 0x688: 0x7b, 0x689: 0x19d, 0x68a: 0x100, 0x68b: 0x100, 0x68c: 0xa2, 0x68d: 0xa2, 0x68e: 0xa2, 0x68f: 0xa2,
+ 0x690: 0xa2, 0x691: 0xa2, 0x692: 0xa2, 0x693: 0xa2, 0x694: 0xa2, 0x695: 0xa2, 0x696: 0xa2, 0x697: 0xa2,
+ 0x698: 0xa2, 0x699: 0xa2, 0x69a: 0xa2, 0x69b: 0x19e, 0x69c: 0xa2, 0x69d: 0x19f, 0x69e: 0xa2, 0x69f: 0x1a0,
+ 0x6a0: 0x1a1, 0x6a1: 0x1a2, 0x6a2: 0x1a3, 0x6a3: 0x100, 0x6a4: 0xa2, 0x6a5: 0xa2, 0x6a6: 0xa2, 0x6a7: 0xa2,
+ 0x6a8: 0xa2, 0x6a9: 0x1a4, 0x6aa: 0x1a5, 0x6ab: 0x1a6, 0x6ac: 0xa2, 0x6ad: 0xa2, 0x6ae: 0x1a7, 0x6af: 0x1a8,
+ 0x6b0: 0x100, 0x6b1: 0x100, 0x6b2: 0x100, 0x6b3: 0x100, 0x6b4: 0x100, 0x6b5: 0x100, 0x6b6: 0x100, 0x6b7: 0x100,
+ 0x6b8: 0x100, 0x6b9: 0x100, 0x6ba: 0x100, 0x6bb: 0x100, 0x6bc: 0x100, 0x6bd: 0x100, 0x6be: 0x100, 0x6bf: 0x100,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0xa6, 0x6c1: 0xa6, 0x6c2: 0xa6, 0x6c3: 0xa6, 0x6c4: 0xa6, 0x6c5: 0xa6, 0x6c6: 0xa6, 0x6c7: 0xa6,
+ 0x6c8: 0xa6, 0x6c9: 0xa6, 0x6ca: 0xa6, 0x6cb: 0xa6, 0x6cc: 0xa6, 0x6cd: 0xa6, 0x6ce: 0xa6, 0x6cf: 0xa6,
+ 0x6d0: 0xa6, 0x6d1: 0xa6, 0x6d2: 0xa6, 0x6d3: 0xa6, 0x6d4: 0xa6, 0x6d5: 0xa6, 0x6d6: 0xa6, 0x6d7: 0xa6,
+ 0x6d8: 0xa6, 0x6d9: 0xa6, 0x6da: 0xa6, 0x6db: 0x1a9, 0x6dc: 0xa6, 0x6dd: 0xa6, 0x6de: 0xa6, 0x6df: 0xa6,
+ 0x6e0: 0xa6, 0x6e1: 0xa6, 0x6e2: 0xa6, 0x6e3: 0xa6, 0x6e4: 0xa6, 0x6e5: 0xa6, 0x6e6: 0xa6, 0x6e7: 0xa6,
+ 0x6e8: 0xa6, 0x6e9: 0xa6, 0x6ea: 0xa6, 0x6eb: 0xa6, 0x6ec: 0xa6, 0x6ed: 0xa6, 0x6ee: 0xa6, 0x6ef: 0xa6,
+ 0x6f0: 0xa6, 0x6f1: 0xa6, 0x6f2: 0xa6, 0x6f3: 0xa6, 0x6f4: 0xa6, 0x6f5: 0xa6, 0x6f6: 0xa6, 0x6f7: 0xa6,
+ 0x6f8: 0xa6, 0x6f9: 0xa6, 0x6fa: 0xa6, 0x6fb: 0xa6, 0x6fc: 0xa6, 0x6fd: 0xa6, 0x6fe: 0xa6, 0x6ff: 0xa6,
+ // Block 0x1c, offset 0x700
+ 0x700: 0xa6, 0x701: 0xa6, 0x702: 0xa6, 0x703: 0xa6, 0x704: 0xa6, 0x705: 0xa6, 0x706: 0xa6, 0x707: 0xa6,
+ 0x708: 0xa6, 0x709: 0xa6, 0x70a: 0xa6, 0x70b: 0xa6, 0x70c: 0xa6, 0x70d: 0xa6, 0x70e: 0xa6, 0x70f: 0xa6,
+ 0x710: 0xa6, 0x711: 0xa6, 0x712: 0xa6, 0x713: 0xa6, 0x714: 0xa6, 0x715: 0xa6, 0x716: 0xa6, 0x717: 0xa6,
+ 0x718: 0xa6, 0x719: 0xa6, 0x71a: 0xa6, 0x71b: 0xa6, 0x71c: 0x1aa, 0x71d: 0xa6, 0x71e: 0xa6, 0x71f: 0xa6,
+ 0x720: 0x1ab, 0x721: 0xa6, 0x722: 0xa6, 0x723: 0xa6, 0x724: 0xa6, 0x725: 0xa6, 0x726: 0xa6, 0x727: 0xa6,
+ 0x728: 0xa6, 0x729: 0xa6, 0x72a: 0xa6, 0x72b: 0xa6, 0x72c: 0xa6, 0x72d: 0xa6, 0x72e: 0xa6, 0x72f: 0xa6,
+ 0x730: 0xa6, 0x731: 0xa6, 0x732: 0xa6, 0x733: 0xa6, 0x734: 0xa6, 0x735: 0xa6, 0x736: 0xa6, 0x737: 0xa6,
+ 0x738: 0xa6, 0x739: 0xa6, 0x73a: 0xa6, 0x73b: 0xa6, 0x73c: 0xa6, 0x73d: 0xa6, 0x73e: 0xa6, 0x73f: 0xa6,
+ // Block 0x1d, offset 0x740
+ 0x740: 0xa6, 0x741: 0xa6, 0x742: 0xa6, 0x743: 0xa6, 0x744: 0xa6, 0x745: 0xa6, 0x746: 0xa6, 0x747: 0xa6,
+ 0x748: 0xa6, 0x749: 0xa6, 0x74a: 0xa6, 0x74b: 0xa6, 0x74c: 0xa6, 0x74d: 0xa6, 0x74e: 0xa6, 0x74f: 0xa6,
+ 0x750: 0xa6, 0x751: 0xa6, 0x752: 0xa6, 0x753: 0xa6, 0x754: 0xa6, 0x755: 0xa6, 0x756: 0xa6, 0x757: 0xa6,
+ 0x758: 0xa6, 0x759: 0xa6, 0x75a: 0xa6, 0x75b: 0xa6, 0x75c: 0xa6, 0x75d: 0xa6, 0x75e: 0xa6, 0x75f: 0xa6,
+ 0x760: 0xa6, 0x761: 0xa6, 0x762: 0xa6, 0x763: 0xa6, 0x764: 0xa6, 0x765: 0xa6, 0x766: 0xa6, 0x767: 0xa6,
+ 0x768: 0xa6, 0x769: 0xa6, 0x76a: 0xa6, 0x76b: 0xa6, 0x76c: 0xa6, 0x76d: 0xa6, 0x76e: 0xa6, 0x76f: 0xa6,
+ 0x770: 0xa6, 0x771: 0xa6, 0x772: 0xa6, 0x773: 0xa6, 0x774: 0xa6, 0x775: 0xa6, 0x776: 0xa6, 0x777: 0xa6,
+ 0x778: 0xa6, 0x779: 0xa6, 0x77a: 0x1ac, 0x77b: 0xa6, 0x77c: 0xa6, 0x77d: 0xa6, 0x77e: 0xa6, 0x77f: 0xa6,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xa6, 0x781: 0xa6, 0x782: 0xa6, 0x783: 0xa6, 0x784: 0xa6, 0x785: 0xa6, 0x786: 0xa6, 0x787: 0xa6,
+ 0x788: 0xa6, 0x789: 0xa6, 0x78a: 0xa6, 0x78b: 0xa6, 0x78c: 0xa6, 0x78d: 0xa6, 0x78e: 0xa6, 0x78f: 0xa6,
+ 0x790: 0xa6, 0x791: 0xa6, 0x792: 0xa6, 0x793: 0xa6, 0x794: 0xa6, 0x795: 0xa6, 0x796: 0xa6, 0x797: 0xa6,
+ 0x798: 0xa6, 0x799: 0xa6, 0x79a: 0xa6, 0x79b: 0xa6, 0x79c: 0xa6, 0x79d: 0xa6, 0x79e: 0xa6, 0x79f: 0xa6,
+ 0x7a0: 0xa6, 0x7a1: 0xa6, 0x7a2: 0xa6, 0x7a3: 0xa6, 0x7a4: 0xa6, 0x7a5: 0xa6, 0x7a6: 0xa6, 0x7a7: 0xa6,
+ 0x7a8: 0xa6, 0x7a9: 0xa6, 0x7aa: 0xa6, 0x7ab: 0xa6, 0x7ac: 0xa6, 0x7ad: 0xa6, 0x7ae: 0xa6, 0x7af: 0x1ad,
+ 0x7b0: 0x100, 0x7b1: 0x100, 0x7b2: 0x100, 0x7b3: 0x100, 0x7b4: 0x100, 0x7b5: 0x100, 0x7b6: 0x100, 0x7b7: 0x100,
+ 0x7b8: 0x100, 0x7b9: 0x100, 0x7ba: 0x100, 0x7bb: 0x100, 0x7bc: 0x100, 0x7bd: 0x100, 0x7be: 0x100, 0x7bf: 0x100,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x100, 0x7c1: 0x100, 0x7c2: 0x100, 0x7c3: 0x100, 0x7c4: 0x100, 0x7c5: 0x100, 0x7c6: 0x100, 0x7c7: 0x100,
+ 0x7c8: 0x100, 0x7c9: 0x100, 0x7ca: 0x100, 0x7cb: 0x100, 0x7cc: 0x100, 0x7cd: 0x100, 0x7ce: 0x100, 0x7cf: 0x100,
+ 0x7d0: 0x100, 0x7d1: 0x100, 0x7d2: 0x100, 0x7d3: 0x100, 0x7d4: 0x100, 0x7d5: 0x100, 0x7d6: 0x100, 0x7d7: 0x100,
+ 0x7d8: 0x100, 0x7d9: 0x100, 0x7da: 0x100, 0x7db: 0x100, 0x7dc: 0x100, 0x7dd: 0x100, 0x7de: 0x100, 0x7df: 0x100,
+ 0x7e0: 0x7c, 0x7e1: 0x7d, 0x7e2: 0x7e, 0x7e3: 0x7f, 0x7e4: 0x80, 0x7e5: 0x81, 0x7e6: 0x82, 0x7e7: 0x83,
+ 0x7e8: 0x84, 0x7e9: 0x100, 0x7ea: 0x100, 0x7eb: 0x100, 0x7ec: 0x100, 0x7ed: 0x100, 0x7ee: 0x100, 0x7ef: 0x100,
+ 0x7f0: 0x100, 0x7f1: 0x100, 0x7f2: 0x100, 0x7f3: 0x100, 0x7f4: 0x100, 0x7f5: 0x100, 0x7f6: 0x100, 0x7f7: 0x100,
+ 0x7f8: 0x100, 0x7f9: 0x100, 0x7fa: 0x100, 0x7fb: 0x100, 0x7fc: 0x100, 0x7fd: 0x100, 0x7fe: 0x100, 0x7ff: 0x100,
+ // Block 0x20, offset 0x800
+ 0x800: 0xa6, 0x801: 0xa6, 0x802: 0xa6, 0x803: 0xa6, 0x804: 0xa6, 0x805: 0xa6, 0x806: 0xa6, 0x807: 0xa6,
+ 0x808: 0xa6, 0x809: 0xa6, 0x80a: 0xa6, 0x80b: 0xa6, 0x80c: 0xa6, 0x80d: 0x1ae, 0x80e: 0xa6, 0x80f: 0xa6,
+ 0x810: 0xa6, 0x811: 0xa6, 0x812: 0xa6, 0x813: 0xa6, 0x814: 0xa6, 0x815: 0xa6, 0x816: 0xa6, 0x817: 0xa6,
+ 0x818: 0xa6, 0x819: 0xa6, 0x81a: 0xa6, 0x81b: 0xa6, 0x81c: 0xa6, 0x81d: 0xa6, 0x81e: 0xa6, 0x81f: 0xa6,
+ 0x820: 0xa6, 0x821: 0xa6, 0x822: 0xa6, 0x823: 0xa6, 0x824: 0xa6, 0x825: 0xa6, 0x826: 0xa6, 0x827: 0xa6,
+ 0x828: 0xa6, 0x829: 0xa6, 0x82a: 0xa6, 0x82b: 0xa6, 0x82c: 0xa6, 0x82d: 0xa6, 0x82e: 0xa6, 0x82f: 0xa6,
+ 0x830: 0xa6, 0x831: 0xa6, 0x832: 0xa6, 0x833: 0xa6, 0x834: 0xa6, 0x835: 0xa6, 0x836: 0xa6, 0x837: 0xa6,
+ 0x838: 0xa6, 0x839: 0xa6, 0x83a: 0xa6, 0x83b: 0xa6, 0x83c: 0xa6, 0x83d: 0xa6, 0x83e: 0xa6, 0x83f: 0xa6,
+ // Block 0x21, offset 0x840
+ 0x840: 0xa6, 0x841: 0xa6, 0x842: 0xa6, 0x843: 0xa6, 0x844: 0xa6, 0x845: 0xa6, 0x846: 0xa6, 0x847: 0xa6,
+ 0x848: 0xa6, 0x849: 0xa6, 0x84a: 0xa6, 0x84b: 0xa6, 0x84c: 0xa6, 0x84d: 0xa6, 0x84e: 0x1af, 0x84f: 0x100,
+ 0x850: 0x100, 0x851: 0x100, 0x852: 0x100, 0x853: 0x100, 0x854: 0x100, 0x855: 0x100, 0x856: 0x100, 0x857: 0x100,
+ 0x858: 0x100, 0x859: 0x100, 0x85a: 0x100, 0x85b: 0x100, 0x85c: 0x100, 0x85d: 0x100, 0x85e: 0x100, 0x85f: 0x100,
+ 0x860: 0x100, 0x861: 0x100, 0x862: 0x100, 0x863: 0x100, 0x864: 0x100, 0x865: 0x100, 0x866: 0x100, 0x867: 0x100,
+ 0x868: 0x100, 0x869: 0x100, 0x86a: 0x100, 0x86b: 0x100, 0x86c: 0x100, 0x86d: 0x100, 0x86e: 0x100, 0x86f: 0x100,
+ 0x870: 0x100, 0x871: 0x100, 0x872: 0x100, 0x873: 0x100, 0x874: 0x100, 0x875: 0x100, 0x876: 0x100, 0x877: 0x100,
+ 0x878: 0x100, 0x879: 0x100, 0x87a: 0x100, 0x87b: 0x100, 0x87c: 0x100, 0x87d: 0x100, 0x87e: 0x100, 0x87f: 0x100,
+ // Block 0x22, offset 0x880
+ 0x890: 0x0c, 0x891: 0x0d, 0x892: 0x0e, 0x893: 0x0f, 0x894: 0x10, 0x895: 0x0a, 0x896: 0x11, 0x897: 0x07,
+ 0x898: 0x12, 0x899: 0x0a, 0x89a: 0x13, 0x89b: 0x14, 0x89c: 0x15, 0x89d: 0x16, 0x89e: 0x17, 0x89f: 0x18,
+ 0x8a0: 0x07, 0x8a1: 0x07, 0x8a2: 0x07, 0x8a3: 0x07, 0x8a4: 0x07, 0x8a5: 0x07, 0x8a6: 0x07, 0x8a7: 0x07,
+ 0x8a8: 0x07, 0x8a9: 0x07, 0x8aa: 0x19, 0x8ab: 0x1a, 0x8ac: 0x1b, 0x8ad: 0x07, 0x8ae: 0x1c, 0x8af: 0x1d,
+ 0x8b0: 0x07, 0x8b1: 0x1e, 0x8b2: 0x1f, 0x8b3: 0x0a, 0x8b4: 0x0a, 0x8b5: 0x0a, 0x8b6: 0x0a, 0x8b7: 0x0a,
+ 0x8b8: 0x0a, 0x8b9: 0x0a, 0x8ba: 0x0a, 0x8bb: 0x0a, 0x8bc: 0x0a, 0x8bd: 0x0a, 0x8be: 0x0a, 0x8bf: 0x0a,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0a, 0x8c1: 0x0a, 0x8c2: 0x0a, 0x8c3: 0x0a, 0x8c4: 0x0a, 0x8c5: 0x0a, 0x8c6: 0x0a, 0x8c7: 0x0a,
+ 0x8c8: 0x0a, 0x8c9: 0x0a, 0x8ca: 0x0a, 0x8cb: 0x0a, 0x8cc: 0x0a, 0x8cd: 0x0a, 0x8ce: 0x0a, 0x8cf: 0x0a,
+ 0x8d0: 0x0a, 0x8d1: 0x0a, 0x8d2: 0x0a, 0x8d3: 0x0a, 0x8d4: 0x0a, 0x8d5: 0x0a, 0x8d6: 0x0a, 0x8d7: 0x0a,
+ 0x8d8: 0x0a, 0x8d9: 0x0a, 0x8da: 0x0a, 0x8db: 0x0a, 0x8dc: 0x0a, 0x8dd: 0x0a, 0x8de: 0x0a, 0x8df: 0x0a,
+ 0x8e0: 0x0a, 0x8e1: 0x0a, 0x8e2: 0x0a, 0x8e3: 0x0a, 0x8e4: 0x0a, 0x8e5: 0x0a, 0x8e6: 0x0a, 0x8e7: 0x0a,
+ 0x8e8: 0x0a, 0x8e9: 0x0a, 0x8ea: 0x0a, 0x8eb: 0x0a, 0x8ec: 0x0a, 0x8ed: 0x0a, 0x8ee: 0x0a, 0x8ef: 0x0a,
+ 0x8f0: 0x0a, 0x8f1: 0x0a, 0x8f2: 0x0a, 0x8f3: 0x0a, 0x8f4: 0x0a, 0x8f5: 0x0a, 0x8f6: 0x0a, 0x8f7: 0x0a,
+ 0x8f8: 0x0a, 0x8f9: 0x0a, 0x8fa: 0x0a, 0x8fb: 0x0a, 0x8fc: 0x0a, 0x8fd: 0x0a, 0x8fe: 0x0a, 0x8ff: 0x0a,
+ // Block 0x24, offset 0x900
+ 0x900: 0x1b0, 0x901: 0x1b1, 0x902: 0x100, 0x903: 0x100, 0x904: 0x1b2, 0x905: 0x1b2, 0x906: 0x1b2, 0x907: 0x1b3,
+ 0x908: 0x100, 0x909: 0x100, 0x90a: 0x100, 0x90b: 0x100, 0x90c: 0x100, 0x90d: 0x100, 0x90e: 0x100, 0x90f: 0x100,
+ 0x910: 0x100, 0x911: 0x100, 0x912: 0x100, 0x913: 0x100, 0x914: 0x100, 0x915: 0x100, 0x916: 0x100, 0x917: 0x100,
+ 0x918: 0x100, 0x919: 0x100, 0x91a: 0x100, 0x91b: 0x100, 0x91c: 0x100, 0x91d: 0x100, 0x91e: 0x100, 0x91f: 0x100,
+ 0x920: 0x100, 0x921: 0x100, 0x922: 0x100, 0x923: 0x100, 0x924: 0x100, 0x925: 0x100, 0x926: 0x100, 0x927: 0x100,
+ 0x928: 0x100, 0x929: 0x100, 0x92a: 0x100, 0x92b: 0x100, 0x92c: 0x100, 0x92d: 0x100, 0x92e: 0x100, 0x92f: 0x100,
+ 0x930: 0x100, 0x931: 0x100, 0x932: 0x100, 0x933: 0x100, 0x934: 0x100, 0x935: 0x100, 0x936: 0x100, 0x937: 0x100,
+ 0x938: 0x100, 0x939: 0x100, 0x93a: 0x100, 0x93b: 0x100, 0x93c: 0x100, 0x93d: 0x100, 0x93e: 0x100, 0x93f: 0x100,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0a, 0x941: 0x0a, 0x942: 0x0a, 0x943: 0x0a, 0x944: 0x0a, 0x945: 0x0a, 0x946: 0x0a, 0x947: 0x0a,
+ 0x948: 0x0a, 0x949: 0x0a, 0x94a: 0x0a, 0x94b: 0x0a, 0x94c: 0x0a, 0x94d: 0x0a, 0x94e: 0x0a, 0x94f: 0x0a,
+ 0x950: 0x0a, 0x951: 0x0a, 0x952: 0x0a, 0x953: 0x0a, 0x954: 0x0a, 0x955: 0x0a, 0x956: 0x0a, 0x957: 0x0a,
+ 0x958: 0x0a, 0x959: 0x0a, 0x95a: 0x0a, 0x95b: 0x0a, 0x95c: 0x0a, 0x95d: 0x0a, 0x95e: 0x0a, 0x95f: 0x0a,
+ 0x960: 0x22, 0x961: 0x0a, 0x962: 0x0a, 0x963: 0x0a, 0x964: 0x0a, 0x965: 0x0a, 0x966: 0x0a, 0x967: 0x0a,
+ 0x968: 0x0a, 0x969: 0x0a, 0x96a: 0x0a, 0x96b: 0x0a, 0x96c: 0x0a, 0x96d: 0x0a, 0x96e: 0x0a, 0x96f: 0x0a,
+ 0x970: 0x0a, 0x971: 0x0a, 0x972: 0x0a, 0x973: 0x0a, 0x974: 0x0a, 0x975: 0x0a, 0x976: 0x0a, 0x977: 0x0a,
+ 0x978: 0x0a, 0x979: 0x0a, 0x97a: 0x0a, 0x97b: 0x0a, 0x97c: 0x0a, 0x97d: 0x0a, 0x97e: 0x0a, 0x97f: 0x0a,
+ // Block 0x26, offset 0x980
+ 0x980: 0x0a, 0x981: 0x0a, 0x982: 0x0a, 0x983: 0x0a, 0x984: 0x0a, 0x985: 0x0a, 0x986: 0x0a, 0x987: 0x0a,
+ 0x988: 0x0a, 0x989: 0x0a, 0x98a: 0x0a, 0x98b: 0x0a, 0x98c: 0x0a, 0x98d: 0x0a, 0x98e: 0x0a, 0x98f: 0x0a,
+}
+
+// idnaSparseOffset: 303 entries, 606 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x7e, 0x87, 0x97, 0xa6, 0xb1, 0xbe, 0xcf, 0xd9, 0xe0, 0xed, 0xfe, 0x105, 0x110, 0x11f, 0x12d, 0x137, 0x139, 0x13e, 0x141, 0x144, 0x146, 0x152, 0x15d, 0x165, 0x16b, 0x171, 0x176, 0x17b, 0x17e, 0x182, 0x188, 0x18d, 0x198, 0x1a2, 0x1a8, 0x1b9, 0x1c4, 0x1c7, 0x1cf, 0x1d2, 0x1df, 0x1e7, 0x1eb, 0x1f2, 0x1fa, 0x20a, 0x216, 0x219, 0x223, 0x22f, 0x23b, 0x247, 0x24f, 0x254, 0x261, 0x272, 0x27d, 0x282, 0x28b, 0x293, 0x299, 0x29e, 0x2a1, 0x2a5, 0x2ab, 0x2af, 0x2b3, 0x2b7, 0x2bc, 0x2c4, 0x2cb, 0x2d6, 0x2e0, 0x2e4, 0x2e7, 0x2ed, 0x2f1, 0x2f3, 0x2f6, 0x2f8, 0x2fb, 0x305, 0x308, 0x317, 0x31b, 0x31f, 0x321, 0x32a, 0x32e, 0x333, 0x338, 0x33e, 0x34e, 0x354, 0x358, 0x367, 0x36c, 0x374, 0x37e, 0x389, 0x391, 0x3a2, 0x3ab, 0x3bb, 0x3c8, 0x3d4, 0x3d9, 0x3e6, 0x3ea, 0x3ef, 0x3f1, 0x3f3, 0x3f7, 0x3f9, 0x3fd, 0x406, 0x40c, 0x410, 0x420, 0x42a, 0x42f, 0x432, 0x438, 0x43f, 0x444, 0x448, 0x44e, 0x453, 0x45c, 0x461, 0x467, 0x46e, 0x475, 0x47c, 0x480, 0x483, 0x488, 0x494, 0x49a, 0x49f, 0x4a6, 0x4ae, 0x4b3, 0x4b7, 0x4c7, 0x4ce, 0x4d2, 0x4d6, 0x4dd, 0x4df, 0x4e2, 0x4e5, 0x4e9, 0x4f2, 0x4f6, 0x4fe, 0x501, 0x509, 0x514, 0x523, 0x52f, 0x535, 0x542, 0x54e, 0x556, 0x55f, 0x56a, 0x571, 0x580, 0x58d, 0x591, 0x59e, 0x5a7, 0x5ab, 0x5ba, 0x5c2, 0x5cd, 0x5d6, 0x5dc, 0x5e4, 0x5ed, 0x5f9, 0x5fc, 0x608, 0x60b, 0x614, 0x617, 0x61c, 0x625, 0x62a, 0x637, 0x642, 0x64b, 0x656, 0x659, 0x65c, 0x666, 0x66f, 0x67b, 0x688, 0x695, 0x6a3, 0x6aa, 0x6b5, 0x6bc, 0x6c0, 0x6c4, 0x6c7, 0x6cc, 0x6cf, 0x6d2, 0x6d6, 0x6d9, 0x6de, 0x6e5, 0x6e8, 0x6f0, 0x6f4, 0x6ff, 0x702, 0x705, 0x708, 0x70e, 0x714, 0x71d, 0x720, 0x723, 0x726, 0x72e, 0x733, 0x73c, 0x73f, 0x744, 0x74e, 0x752, 0x756, 0x759, 0x75c, 0x760, 0x76f, 0x77b, 0x77f, 0x784, 0x789, 0x78e, 0x792, 0x797, 0x7a0, 0x7a5, 0x7a9, 0x7af, 0x7b5, 0x7ba, 0x7c0, 0x7c6, 0x7d0, 0x7d6, 0x7df, 0x7e2, 0x7e5, 0x7e9, 0x7ed, 0x7f1, 0x7f7, 0x7fd, 0x802, 0x805, 0x815, 0x81c, 0x820, 0x827, 0x82b, 0x831, 0x838, 0x83f, 0x845, 0x84e, 0x852, 0x860, 0x863, 0x866, 0x86a, 0x86e, 0x871, 0x875, 0x878, 0x87d, 0x87f, 0x881}
+
+// idnaSparseValues: 2180 entries, 8720 bytes
+var idnaSparseValues = [2180]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x00a9, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x00b1, lo: 0xb2, hi: 0xb2},
+ {value: 0x00b9, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x00c1, lo: 0xb7, hi: 0xb7},
+ {value: 0x00c9, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x6, offset 0x33
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0131, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xae},
+ {value: 0x0808, lo: 0xaf, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x62
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbf},
+ // Block 0xc, offset 0x6c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x78
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0a08, lo: 0x80, hi: 0x88},
+ {value: 0x0808, lo: 0x89, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xe, offset 0x7e
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0xf, offset 0x87
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x10, offset 0x97
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x11, offset 0xa6
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb1
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbe
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x14, offset 0xcf
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x01f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x15, offset 0xd9
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x16, offset 0xe0
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0201, lo: 0x9c, hi: 0x9c},
+ {value: 0x0209, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x17, offset 0xed
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x18, offset 0xfe
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x19, offset 0x105
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1a, offset 0x110
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1b, offset 0x11f
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1c, offset 0x12d
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1d, offset 0x137
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1e, offset 0x139
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x1f, offset 0x13e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x20, offset 0x141
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x21, offset 0x144
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x22, offset 0x146
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x23, offset 0x152
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x24, offset 0x15d
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x25, offset 0x165
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x16b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x27, offset 0x171
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x28, offset 0x176
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x29, offset 0x17b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2a, offset 0x17e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2b, offset 0x182
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2c, offset 0x188
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2d, offset 0x18d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x3808, lo: 0x95, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3808, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2e, offset 0x198
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x2f, offset 0x1a2
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x30, offset 0x1a8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x31, offset 0x1b9
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x33c0, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x32, offset 0x1c4
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x33, offset 0x1c7
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x34, offset 0x1cf
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x35, offset 0x1d2
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x36, offset 0x1df
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x37, offset 0x1e7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x38, offset 0x1eb
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x39, offset 0x1f2
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3a, offset 0x1fa
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3b, offset 0x20a
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x216
+ {value: 0x0000, lo: 0x02},
+ {value: 0x3308, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0xbf},
+ // Block 0x3d, offset 0x219
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3e, offset 0x223
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3f, offset 0x22f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x40, offset 0x23b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x41, offset 0x247
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x42, offset 0x24f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x43, offset 0x254
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x02a9, lo: 0x80, hi: 0x80},
+ {value: 0x02b1, lo: 0x81, hi: 0x81},
+ {value: 0x02b9, lo: 0x82, hi: 0x82},
+ {value: 0x02c1, lo: 0x83, hi: 0x83},
+ {value: 0x02c9, lo: 0x84, hi: 0x85},
+ {value: 0x02d1, lo: 0x86, hi: 0x86},
+ {value: 0x02d9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x059d, lo: 0x90, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x059d, lo: 0xbd, hi: 0xbf},
+ // Block 0x44, offset 0x261
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x45, offset 0x272
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x46, offset 0x27d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x47, offset 0x282
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x0851, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x48, offset 0x28b
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0859, lo: 0xac, hi: 0xac},
+ {value: 0x0861, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x0869, lo: 0xaf, hi: 0xaf},
+ {value: 0x0871, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x293
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4a, offset 0x299
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09dd, lo: 0xa9, hi: 0xa9},
+ {value: 0x09fd, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4b, offset 0x29e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4c, offset 0x2a1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0929, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4d, offset 0x2a5
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e7e, lo: 0xb4, hi: 0xb4},
+ {value: 0x0932, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e9e, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x4e, offset 0x2ab
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x0939, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x4f, offset 0x2af
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x50, offset 0x2b3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0xbf},
+ // Block 0x51, offset 0x2b7
+ {value: 0x0000, lo: 0x04},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ebd, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x52, offset 0x2bc
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x53, offset 0x2c4
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x54, offset 0x2cb
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x55, offset 0x2d6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x56, offset 0x2e0
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x57, offset 0x2e4
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x58, offset 0x2e7
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0ef5, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x59, offset 0x2ed
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0f15, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5a, offset 0x2f1
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f35, lo: 0x80, hi: 0xbf},
+ // Block 0x5b, offset 0x2f3
+ {value: 0x0020, lo: 0x02},
+ {value: 0x1735, lo: 0x80, hi: 0x8f},
+ {value: 0x1915, lo: 0x90, hi: 0xbf},
+ // Block 0x5c, offset 0x2f6
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1f15, lo: 0x80, hi: 0xbf},
+ // Block 0x5d, offset 0x2f8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x5e, offset 0x2fb
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x096a, lo: 0x9b, hi: 0x9b},
+ {value: 0x0972, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x0979, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x5f, offset 0x305
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x0981, lo: 0xbf, hi: 0xbf},
+ // Block 0x60, offset 0x308
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb0},
+ {value: 0x2a35, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a55, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a75, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a95, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a75, lo: 0xb5, hi: 0xb5},
+ {value: 0x2ab5, lo: 0xb6, hi: 0xb6},
+ {value: 0x2ad5, lo: 0xb7, hi: 0xb7},
+ {value: 0x2af5, lo: 0xb8, hi: 0xb9},
+ {value: 0x2b15, lo: 0xba, hi: 0xbb},
+ {value: 0x2b35, lo: 0xbc, hi: 0xbd},
+ {value: 0x2b15, lo: 0xbe, hi: 0xbf},
+ // Block 0x61, offset 0x317
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x62, offset 0x31b
+ {value: 0x0008, lo: 0x03},
+ {value: 0x098a, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0a82, lo: 0xa0, hi: 0xbf},
+ // Block 0x63, offset 0x31f
+ {value: 0x0008, lo: 0x01},
+ {value: 0x0d19, lo: 0x80, hi: 0xbf},
+ // Block 0x64, offset 0x321
+ {value: 0x0008, lo: 0x08},
+ {value: 0x0f19, lo: 0x80, hi: 0xb0},
+ {value: 0x4045, lo: 0xb1, hi: 0xb1},
+ {value: 0x10a1, lo: 0xb2, hi: 0xb3},
+ {value: 0x4065, lo: 0xb4, hi: 0xb4},
+ {value: 0x10b1, lo: 0xb5, hi: 0xb7},
+ {value: 0x4085, lo: 0xb8, hi: 0xb8},
+ {value: 0x4085, lo: 0xb9, hi: 0xb9},
+ {value: 0x10c9, lo: 0xba, hi: 0xbf},
+ // Block 0x65, offset 0x32a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x66, offset 0x32e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x67, offset 0x333
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x68, offset 0x338
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x69, offset 0x33e
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x3b08, lo: 0xac, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6a, offset 0x34e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6b, offset 0x354
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6c, offset 0x358
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x6d, offset 0x367
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x6e, offset 0x36c
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x6f, offset 0x374
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x70, offset 0x37e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x71, offset 0x389
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x72, offset 0x391
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x73, offset 0x3a2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x74, offset 0x3ab
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x75, offset 0x3bb
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x76, offset 0x3c8
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x449d, lo: 0x9c, hi: 0x9c},
+ {value: 0x44b5, lo: 0x9d, hi: 0x9d},
+ {value: 0x0941, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa8},
+ {value: 0x13f9, lo: 0xa9, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x44cd, lo: 0xb0, hi: 0xbf},
+ // Block 0x77, offset 0x3d4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44ed, lo: 0x80, hi: 0x8f},
+ {value: 0x450d, lo: 0x90, hi: 0x9f},
+ {value: 0x452d, lo: 0xa0, hi: 0xaf},
+ {value: 0x450d, lo: 0xb0, hi: 0xbf},
+ // Block 0x78, offset 0x3d9
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x79, offset 0x3e6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3ea
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7b, offset 0x3ef
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x7c, offset 0x3f1
+ {value: 0x0020, lo: 0x01},
+ {value: 0x454d, lo: 0x80, hi: 0xbf},
+ // Block 0x7d, offset 0x3f3
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d4d, lo: 0x80, hi: 0x94},
+ {value: 0x4b0d, lo: 0x95, hi: 0x95},
+ {value: 0x4fed, lo: 0x96, hi: 0xbf},
+ // Block 0x7e, offset 0x3f7
+ {value: 0x0020, lo: 0x01},
+ {value: 0x552d, lo: 0x80, hi: 0xbf},
+ // Block 0x7f, offset 0x3f9
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5d2d, lo: 0x80, hi: 0x84},
+ {value: 0x568d, lo: 0x85, hi: 0x85},
+ {value: 0x5dcd, lo: 0x86, hi: 0xbf},
+ // Block 0x80, offset 0x3fd
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b8d, lo: 0x80, hi: 0x8f},
+ {value: 0x6d4d, lo: 0x90, hi: 0x90},
+ {value: 0x6d8d, lo: 0x91, hi: 0xab},
+ {value: 0x1401, lo: 0xac, hi: 0xac},
+ {value: 0x70ed, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x710d, lo: 0xb0, hi: 0xbf},
+ // Block 0x81, offset 0x406
+ {value: 0x0020, lo: 0x05},
+ {value: 0x730d, lo: 0x80, hi: 0xad},
+ {value: 0x656d, lo: 0xae, hi: 0xae},
+ {value: 0x78cd, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f8d, lo: 0xb6, hi: 0xb6},
+ {value: 0x79ad, lo: 0xb7, hi: 0xbf},
+ // Block 0x82, offset 0x40c
+ {value: 0x0008, lo: 0x03},
+ {value: 0x1751, lo: 0x80, hi: 0x82},
+ {value: 0x1741, lo: 0x83, hi: 0x83},
+ {value: 0x1769, lo: 0x84, hi: 0xbf},
+ // Block 0x83, offset 0x410
+ {value: 0x0008, lo: 0x0f},
+ {value: 0x1d81, lo: 0x80, hi: 0x83},
+ {value: 0x1d99, lo: 0x84, hi: 0x85},
+ {value: 0x1da1, lo: 0x86, hi: 0x87},
+ {value: 0x1da9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x1de9, lo: 0x92, hi: 0x97},
+ {value: 0x1e11, lo: 0x98, hi: 0x9c},
+ {value: 0x1e31, lo: 0x9d, hi: 0xb3},
+ {value: 0x1d71, lo: 0xb4, hi: 0xb4},
+ {value: 0x1d81, lo: 0xb5, hi: 0xb5},
+ {value: 0x1ee9, lo: 0xb6, hi: 0xbb},
+ {value: 0x1f09, lo: 0xbc, hi: 0xbc},
+ {value: 0x1ef9, lo: 0xbd, hi: 0xbd},
+ {value: 0x1f19, lo: 0xbe, hi: 0xbf},
+ // Block 0x84, offset 0x420
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x85, offset 0x42a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x86, offset 0x42f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x87, offset 0x432
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x88, offset 0x438
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x89, offset 0x43f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8a, offset 0x444
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8b, offset 0x448
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8c, offset 0x44e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8d, offset 0x453
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x8e, offset 0x45c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8f, offset 0x461
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x90, offset 0x467
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8b0d, lo: 0x98, hi: 0x9f},
+ {value: 0x8b25, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x91, offset 0x46e
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8b25, lo: 0xb0, hi: 0xb7},
+ {value: 0x8b0d, lo: 0xb8, hi: 0xbf},
+ // Block 0x92, offset 0x475
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x93, offset 0x47c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x94, offset 0x480
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x95, offset 0x483
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x96, offset 0x488
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x97, offset 0x494
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x98, offset 0x49a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x99, offset 0x49f
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9a, offset 0x4a6
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9b, offset 0x4ae
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9c, offset 0x4b3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0x9d, offset 0x4b7
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x9e, offset 0x4c7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0x9f, offset 0x4ce
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa0, offset 0x4d2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa1, offset 0x4d6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa2, offset 0x4dd
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa3, offset 0x4df
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa4, offset 0x4e2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa5, offset 0x4e5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa6, offset 0x4e9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0908, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0xa1},
+ {value: 0x0c08, lo: 0xa2, hi: 0xa2},
+ {value: 0x0a08, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xa7, offset 0x4f2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xa8, offset 0x4f6
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xac},
+ {value: 0x0818, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xa9, offset 0x4fe
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbf},
+ // Block 0xaa, offset 0x501
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0xa6},
+ {value: 0x0808, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb3},
+ {value: 0x0a08, lo: 0xb4, hi: 0xbf},
+ // Block 0xab, offset 0x509
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0a08, lo: 0x80, hi: 0x84},
+ {value: 0x0808, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x90},
+ {value: 0x0a18, lo: 0x91, hi: 0x93},
+ {value: 0x0c18, lo: 0x94, hi: 0x94},
+ {value: 0x0818, lo: 0x95, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb3},
+ {value: 0x0c08, lo: 0xb4, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xbf},
+ // Block 0xac, offset 0x514
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0a08, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb3},
+ {value: 0x0c08, lo: 0xb4, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb7},
+ {value: 0x0a08, lo: 0xb8, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xba},
+ {value: 0x0a08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0c08, lo: 0xbd, hi: 0xbd},
+ {value: 0x0a08, lo: 0xbe, hi: 0xbf},
+ // Block 0xad, offset 0x523
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0x81},
+ {value: 0x0c08, lo: 0x82, hi: 0x83},
+ {value: 0x0a08, lo: 0x84, hi: 0x84},
+ {value: 0x0818, lo: 0x85, hi: 0x88},
+ {value: 0x0c18, lo: 0x89, hi: 0x89},
+ {value: 0x0a18, lo: 0x8a, hi: 0x8a},
+ {value: 0x0918, lo: 0x8b, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xae, offset 0x52f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xaf, offset 0x535
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x3b08, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xb0, offset 0x542
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xb1, offset 0x54e
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb2, offset 0x556
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb3, offset 0x55f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb4, offset 0x56a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb5, offset 0x571
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x3008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb6, offset 0x580
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb7, offset 0x58d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0xbf},
+ // Block 0xb8, offset 0x591
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb9, offset 0x59e
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xba, offset 0x5a7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xbb, offset 0x5ab
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xbc, offset 0x5ba
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xbd, offset 0x5c2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbe, offset 0x5cd
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbf, offset 0x5d6
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xc0, offset 0x5dc
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xc1, offset 0x5e4
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xc2, offset 0x5ed
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xc3, offset 0x5f9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc4, offset 0x5fc
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc5, offset 0x608
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xc6, offset 0x60b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xc7, offset 0x614
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc8, offset 0x617
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc9, offset 0x61c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xca, offset 0x625
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xcb, offset 0x62a
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x99},
+ {value: 0x3308, lo: 0x9a, hi: 0x9b},
+ {value: 0x3008, lo: 0x9c, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xbf},
+ // Block 0xcc, offset 0x637
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xcd, offset 0x642
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xce, offset 0x64b
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xcf, offset 0x656
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd0, offset 0x659
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xd1, offset 0x65c
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xd2, offset 0x666
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xd3, offset 0x66f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xd4, offset 0x67b
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xd5, offset 0x688
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xd6, offset 0x695
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x3008, lo: 0x93, hi: 0x94},
+ {value: 0x3308, lo: 0x95, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x96},
+ {value: 0x3b08, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xbf},
+ // Block 0xd7, offset 0x6a3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd8, offset 0x6aa
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0xd9, offset 0x6b5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3808, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xda, offset 0x6bc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0xdb, offset 0x6c0
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xdc, offset 0x6c4
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xdd, offset 0x6c7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xde, offset 0x6cc
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xdf, offset 0x6cf
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbf},
+ // Block 0xe0, offset 0x6d2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xe1, offset 0x6d6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0340, lo: 0xb0, hi: 0xbf},
+ // Block 0xe2, offset 0x6d9
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0xe3, offset 0x6de
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xe4, offset 0x6e5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xe5, offset 0x6e8
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xe6, offset 0x6f0
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xe7, offset 0x6f4
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xe8, offset 0x6ff
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xe9, offset 0x702
+ {value: 0x0000, lo: 0x02},
+ {value: 0xe105, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xea, offset 0x705
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0xeb, offset 0x708
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbf},
+ // Block 0xec, offset 0x70e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xed, offset 0x714
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xee, offset 0x71d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xef, offset 0x720
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0xf0, offset 0x723
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xf1, offset 0x726
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xf2, offset 0x72e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xf3, offset 0x733
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0x94},
+ {value: 0x0008, lo: 0x95, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xa3},
+ {value: 0x0008, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xf4, offset 0x73c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xf5, offset 0x73f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xf6, offset 0x744
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xf7, offset 0x74e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbf},
+ // Block 0xf8, offset 0x752
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0xf9, offset 0x756
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xfa, offset 0x759
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xfb, offset 0x75c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xfc, offset 0x760
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0x2379, lo: 0x9e, hi: 0x9e},
+ {value: 0x2381, lo: 0x9f, hi: 0x9f},
+ {value: 0x2389, lo: 0xa0, hi: 0xa0},
+ {value: 0x2391, lo: 0xa1, hi: 0xa1},
+ {value: 0x2399, lo: 0xa2, hi: 0xa2},
+ {value: 0x23a1, lo: 0xa3, hi: 0xa3},
+ {value: 0x23a9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xfd, offset 0x76f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0x23b1, lo: 0xbb, hi: 0xbb},
+ {value: 0x23b9, lo: 0xbc, hi: 0xbc},
+ {value: 0x23c1, lo: 0xbd, hi: 0xbd},
+ {value: 0x23c9, lo: 0xbe, hi: 0xbe},
+ {value: 0x23d1, lo: 0xbf, hi: 0xbf},
+ // Block 0xfe, offset 0x77b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x23d9, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xff, offset 0x77f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0x100, offset 0x784
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x101, offset 0x789
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x102, offset 0x78e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x103, offset 0x792
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x104, offset 0x797
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x105, offset 0x7a0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0x106, offset 0x7a5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0x107, offset 0x7a9
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x108, offset 0x7af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0x109, offset 0x7b5
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x3308, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0x10a, offset 0x7ba
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x10b, offset 0x7c0
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x10c, offset 0x7c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x10d, offset 0x7d0
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x10e, offset 0x7d6
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0b08, lo: 0x8b, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x10f, offset 0x7df
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xb0},
+ {value: 0x0818, lo: 0xb1, hi: 0xbf},
+ // Block 0x110, offset 0x7e2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0818, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x111, offset 0x7e5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0818, lo: 0x81, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x112, offset 0x7e9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0x113, offset 0x7ed
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x114, offset 0x7f1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x115, offset 0x7f7
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x116, offset 0x7fd
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0x2709, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x117, offset 0x802
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0x118, offset 0x805
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x2889, lo: 0x80, hi: 0x80},
+ {value: 0x2891, lo: 0x81, hi: 0x81},
+ {value: 0x2899, lo: 0x82, hi: 0x82},
+ {value: 0x28a1, lo: 0x83, hi: 0x83},
+ {value: 0x28a9, lo: 0x84, hi: 0x84},
+ {value: 0x28b1, lo: 0x85, hi: 0x85},
+ {value: 0x28b9, lo: 0x86, hi: 0x86},
+ {value: 0x28c1, lo: 0x87, hi: 0x87},
+ {value: 0x28c9, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x28d1, lo: 0x90, hi: 0x90},
+ {value: 0x28d9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0x119, offset 0x815
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x11a, offset 0x81c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x11b, offset 0x820
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x11c, offset 0x827
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x11d, offset 0x82b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x11e, offset 0x831
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0x11f, offset 0x838
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x120, offset 0x83f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x121, offset 0x845
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x122, offset 0x84e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0xbf},
+ // Block 0x123, offset 0x852
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0018, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0xaf},
+ {value: 0x06e1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0049, lo: 0xb1, hi: 0xb1},
+ {value: 0x0029, lo: 0xb2, hi: 0xb2},
+ {value: 0x0031, lo: 0xb3, hi: 0xb3},
+ {value: 0x06e9, lo: 0xb4, hi: 0xb4},
+ {value: 0x06f1, lo: 0xb5, hi: 0xb5},
+ {value: 0x06f9, lo: 0xb6, hi: 0xb6},
+ {value: 0x0701, lo: 0xb7, hi: 0xb7},
+ {value: 0x0709, lo: 0xb8, hi: 0xb8},
+ {value: 0x0711, lo: 0xb9, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x124, offset 0x860
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x125, offset 0x863
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x126, offset 0x866
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x127, offset 0x86a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x128, offset 0x86e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x129, offset 0x871
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbf},
+ // Block 0x12a, offset 0x875
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x12b, offset 0x878
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x12c, offset 0x87d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x12d, offset 0x87f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x12e, offset 0x881
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 46723 bytes (45KiB); checksum: 4CF3143A
diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go
new file mode 100644
index 0000000..0f25e84
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables9.0.0.go
@@ -0,0 +1,4486 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+//go:build !go1.10
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "9.0.0"
+
+var mappings string = "" + // Size: 8175 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" +
+ "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" +
+ "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" +
+ "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" +
+ "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" +
+ "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" +
+ "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" +
+ "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" +
+ "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" +
+ "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" +
+ "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" +
+ "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" +
+ "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" +
+ "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" +
+ "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" +
+ "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" +
+ "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" +
+ "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" +
+ "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" +
+ "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" +
+ "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" +
+ "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" +
+ "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" +
+ "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" +
+ "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" +
+ "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" +
+ "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" +
+ "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" +
+ "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" +
+ "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" +
+ "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" +
+ "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" +
+ "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" +
+ "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" +
+ "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4855 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" +
+ "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" +
+ "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" +
+ "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" +
+ "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" +
+ "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" +
+ "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" +
+ "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" +
+ "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" +
+ "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" +
+ "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" +
+ "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" +
+ "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" +
+ "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" +
+ "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" +
+ "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" +
+ "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" +
+ "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" +
+ "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" +
+ "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" +
+ "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" +
+ "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" +
+ "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" +
+ "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" +
+ "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" +
+ "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" +
+ "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" +
+ "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " +
+ "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" +
+ "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" +
+ "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" +
+ "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" +
+ "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" +
+ ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" +
+ "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" +
+ "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" +
+ "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" +
+ "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" +
+ "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" +
+ "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" +
+ "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" +
+ "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" +
+ "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" +
+ "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" +
+ "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" +
+ "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" +
+ "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" +
+ "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" +
+ "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" +
+ "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" +
+ "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" +
+ "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" +
+ "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" +
+ "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" +
+ "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" +
+ "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" +
+ "\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" +
+ "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" +
+ "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" +
+ "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" +
+ "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" +
+ "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" +
+ "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" +
+ "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" +
+ "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" +
+ "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" +
+ "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" +
+ "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" +
+ "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" +
+ "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" +
+ "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" +
+ "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" +
+ "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" +
+ "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" +
+ "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" +
+ "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" +
+ "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." +
+ "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 28600 bytes (27.93 KiB). Checksum: 95575047b5d8fff.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 124:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 124
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 126 blocks, 8064 entries, 16128 bytes
+// The third block is the zero block.
+var idnaValues = [8064]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x3008, 0x541: 0x3308, 0x542: 0x3308, 0x543: 0x3308, 0x544: 0x3308, 0x545: 0x3308,
+ 0x546: 0x3308, 0x547: 0x3308, 0x548: 0x3308, 0x549: 0x3008, 0x54a: 0x3008, 0x54b: 0x3008,
+ 0x54c: 0x3008, 0x54d: 0x3b08, 0x54e: 0x3008, 0x54f: 0x3008, 0x550: 0x0008, 0x551: 0x3308,
+ 0x552: 0x3308, 0x553: 0x3308, 0x554: 0x3308, 0x555: 0x3308, 0x556: 0x3308, 0x557: 0x3308,
+ 0x558: 0x04c9, 0x559: 0x0501, 0x55a: 0x0539, 0x55b: 0x0571, 0x55c: 0x05a9, 0x55d: 0x05e1,
+ 0x55e: 0x0619, 0x55f: 0x0651, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x3308, 0x563: 0x3308,
+ 0x564: 0x0018, 0x565: 0x0018, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0008,
+ 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008,
+ 0x570: 0x0018, 0x571: 0x0008, 0x572: 0x0008, 0x573: 0x0008, 0x574: 0x0008, 0x575: 0x0008,
+ 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0008, 0x57b: 0x0008,
+ 0x57c: 0x0008, 0x57d: 0x0008, 0x57e: 0x0008, 0x57f: 0x0008,
+ // Block 0x16, offset 0x580
+ 0x580: 0x0008, 0x581: 0x3308, 0x582: 0x3008, 0x583: 0x3008, 0x584: 0x0040, 0x585: 0x0008,
+ 0x586: 0x0008, 0x587: 0x0008, 0x588: 0x0008, 0x589: 0x0008, 0x58a: 0x0008, 0x58b: 0x0008,
+ 0x58c: 0x0008, 0x58d: 0x0040, 0x58e: 0x0040, 0x58f: 0x0008, 0x590: 0x0008, 0x591: 0x0040,
+ 0x592: 0x0040, 0x593: 0x0008, 0x594: 0x0008, 0x595: 0x0008, 0x596: 0x0008, 0x597: 0x0008,
+ 0x598: 0x0008, 0x599: 0x0008, 0x59a: 0x0008, 0x59b: 0x0008, 0x59c: 0x0008, 0x59d: 0x0008,
+ 0x59e: 0x0008, 0x59f: 0x0008, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x0008, 0x5a3: 0x0008,
+ 0x5a4: 0x0008, 0x5a5: 0x0008, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0040,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0008, 0x5b1: 0x0040, 0x5b2: 0x0008, 0x5b3: 0x0040, 0x5b4: 0x0040, 0x5b5: 0x0040,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0040, 0x5bb: 0x0040,
+ 0x5bc: 0x3308, 0x5bd: 0x0008, 0x5be: 0x3008, 0x5bf: 0x3008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x0040,
+ 0x5c6: 0x0040, 0x5c7: 0x3008, 0x5c8: 0x3008, 0x5c9: 0x0040, 0x5ca: 0x0040, 0x5cb: 0x3008,
+ 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x0008, 0x5cf: 0x0040, 0x5d0: 0x0040, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0040, 0x5d4: 0x0040, 0x5d5: 0x0040, 0x5d6: 0x0040, 0x5d7: 0x3008,
+ 0x5d8: 0x0040, 0x5d9: 0x0040, 0x5da: 0x0040, 0x5db: 0x0040, 0x5dc: 0x0689, 0x5dd: 0x06c1,
+ 0x5de: 0x0040, 0x5df: 0x06f9, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308,
+ 0x5e4: 0x0040, 0x5e5: 0x0040, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0008, 0x5f2: 0x0018, 0x5f3: 0x0018, 0x5f4: 0x0018, 0x5f5: 0x0018,
+ 0x5f6: 0x0018, 0x5f7: 0x0018, 0x5f8: 0x0018, 0x5f9: 0x0018, 0x5fa: 0x0018, 0x5fb: 0x0018,
+ 0x5fc: 0x0040, 0x5fd: 0x0040, 0x5fe: 0x0040, 0x5ff: 0x0040,
+ // Block 0x18, offset 0x600
+ 0x600: 0x0040, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008,
+ 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0040,
+ 0x60c: 0x0040, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008,
+ 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008,
+ 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008,
+ 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0731, 0x634: 0x0040, 0x635: 0x0008,
+ 0x636: 0x0769, 0x637: 0x0040, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040,
+ 0x63c: 0x3308, 0x63d: 0x0040, 0x63e: 0x3008, 0x63f: 0x3008,
+ // Block 0x19, offset 0x640
+ 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x0040, 0x644: 0x0040, 0x645: 0x0040,
+ 0x646: 0x0040, 0x647: 0x3308, 0x648: 0x3308, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3308,
+ 0x64c: 0x3308, 0x64d: 0x3b08, 0x64e: 0x0040, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x3308,
+ 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x0040,
+ 0x658: 0x0040, 0x659: 0x07a1, 0x65a: 0x07d9, 0x65b: 0x0811, 0x65c: 0x0008, 0x65d: 0x0040,
+ 0x65e: 0x0849, 0x65f: 0x0040, 0x660: 0x0040, 0x661: 0x0040, 0x662: 0x0040, 0x663: 0x0040,
+ 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x3308, 0x671: 0x3308, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0008, 0x675: 0x3308,
+ 0x676: 0x0040, 0x677: 0x0040, 0x678: 0x0040, 0x679: 0x0040, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x0040, 0x67d: 0x0040, 0x67e: 0x0040, 0x67f: 0x0040,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008,
+ 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0008,
+ 0x68c: 0x0008, 0x68d: 0x0008, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0008,
+ 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008,
+ 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008,
+ 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008,
+ 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0040, 0x6b5: 0x0008,
+ 0x6b6: 0x0008, 0x6b7: 0x0008, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x3308, 0x6bd: 0x0008, 0x6be: 0x3008, 0x6bf: 0x3008,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3308, 0x6c4: 0x3308, 0x6c5: 0x3308,
+ 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x3008, 0x6ca: 0x0040, 0x6cb: 0x3008,
+ 0x6cc: 0x3008, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0008, 0x6d1: 0x0040,
+ 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040,
+ 0x6d8: 0x0040, 0x6d9: 0x0040, 0x6da: 0x0040, 0x6db: 0x0040, 0x6dc: 0x0040, 0x6dd: 0x0040,
+ 0x6de: 0x0040, 0x6df: 0x0040, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x3308, 0x6e3: 0x3308,
+ 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0018, 0x6f1: 0x0018, 0x6f2: 0x0040, 0x6f3: 0x0040, 0x6f4: 0x0040, 0x6f5: 0x0040,
+ 0x6f6: 0x0040, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3008, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008,
+ 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008,
+ 0x70c: 0x0008, 0x70d: 0x0040, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008,
+ 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008,
+ 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008,
+ 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008,
+ 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040,
+ 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x0040,
+ 0x746: 0x0040, 0x747: 0x3008, 0x748: 0x3008, 0x749: 0x0040, 0x74a: 0x0040, 0x74b: 0x3008,
+ 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0040, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x3308, 0x757: 0x3008,
+ 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0881, 0x75d: 0x08b9,
+ 0x75e: 0x0040, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308,
+ 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0018, 0x771: 0x0008, 0x772: 0x0018, 0x773: 0x0018, 0x774: 0x0018, 0x775: 0x0018,
+ 0x776: 0x0018, 0x777: 0x0018, 0x778: 0x0040, 0x779: 0x0040, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x0040, 0x77f: 0x0040,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x0040, 0x781: 0x0040, 0x782: 0x3308, 0x783: 0x0008, 0x784: 0x0040, 0x785: 0x0008,
+ 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0040,
+ 0x78c: 0x0040, 0x78d: 0x0040, 0x78e: 0x0008, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040,
+ 0x792: 0x0008, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0040, 0x797: 0x0040,
+ 0x798: 0x0040, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0008, 0x79d: 0x0040,
+ 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0040, 0x7a1: 0x0040, 0x7a2: 0x0040, 0x7a3: 0x0008,
+ 0x7a4: 0x0008, 0x7a5: 0x0040, 0x7a6: 0x0040, 0x7a7: 0x0040, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0040, 0x7ac: 0x0040, 0x7ad: 0x0040, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0008, 0x7b1: 0x0008, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0008, 0x7b5: 0x0008,
+ 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x3008, 0x7bf: 0x3008,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x3308, 0x7c1: 0x3008, 0x7c2: 0x3008, 0x7c3: 0x3008, 0x7c4: 0x3008, 0x7c5: 0x0040,
+ 0x7c6: 0x3308, 0x7c7: 0x3308, 0x7c8: 0x3308, 0x7c9: 0x0040, 0x7ca: 0x3308, 0x7cb: 0x3308,
+ 0x7cc: 0x3308, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040,
+ 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x0040,
+ 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0040, 0x7dd: 0x0040,
+ 0x7de: 0x0040, 0x7df: 0x0040, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308,
+ 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0040, 0x7f1: 0x0040, 0x7f2: 0x0040, 0x7f3: 0x0040, 0x7f4: 0x0040, 0x7f5: 0x0040,
+ 0x7f6: 0x0040, 0x7f7: 0x0040, 0x7f8: 0x0018, 0x7f9: 0x0018, 0x7fa: 0x0018, 0x7fb: 0x0018,
+ 0x7fc: 0x0018, 0x7fd: 0x0018, 0x7fe: 0x0018, 0x7ff: 0x0018,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0008, 0x801: 0x3308, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x0040, 0x805: 0x0008,
+ 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0008,
+ 0x80c: 0x0008, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040,
+ 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0008, 0x817: 0x0008,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0008, 0x81c: 0x0008, 0x81d: 0x0008,
+ 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x0008, 0x823: 0x0008,
+ 0x824: 0x0008, 0x825: 0x0008, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0040,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0040, 0x835: 0x0008,
+ 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040,
+ 0x83c: 0x3308, 0x83d: 0x0008, 0x83e: 0x3008, 0x83f: 0x3308,
+ // Block 0x21, offset 0x840
+ 0x840: 0x3008, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040,
+ 0x846: 0x3308, 0x847: 0x3008, 0x848: 0x3008, 0x849: 0x0040, 0x84a: 0x3008, 0x84b: 0x3008,
+ 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040,
+ 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3008, 0x856: 0x3008, 0x857: 0x0040,
+ 0x858: 0x0040, 0x859: 0x0040, 0x85a: 0x0040, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0040,
+ 0x85e: 0x0008, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308,
+ 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0040, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040,
+ 0x876: 0x0040, 0x877: 0x0040, 0x878: 0x0040, 0x879: 0x0040, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x0040, 0x87d: 0x0040, 0x87e: 0x0040, 0x87f: 0x0040,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3308, 0x882: 0x3308, 0x883: 0x3308, 0x884: 0x3308, 0x885: 0x0040,
+ 0x886: 0x3008, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3008, 0x88d: 0x3b08, 0x88e: 0x0008, 0x88f: 0x0018, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x3008,
+ 0x898: 0x0018, 0x899: 0x0018, 0x89a: 0x0018, 0x89b: 0x0018, 0x89c: 0x0018, 0x89d: 0x0018,
+ 0x89e: 0x0018, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0018, 0x8b1: 0x0018, 0x8b2: 0x0018, 0x8b3: 0x0018, 0x8b4: 0x0018, 0x8b5: 0x0018,
+ 0x8b6: 0x0018, 0x8b7: 0x0018, 0x8b8: 0x0018, 0x8b9: 0x0018, 0x8ba: 0x0008, 0x8bb: 0x0008,
+ 0x8bc: 0x0008, 0x8bd: 0x0008, 0x8be: 0x0008, 0x8bf: 0x0008,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0040, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x0040, 0x8c4: 0x0008, 0x8c5: 0x0040,
+ 0x8c6: 0x0040, 0x8c7: 0x0008, 0x8c8: 0x0008, 0x8c9: 0x0040, 0x8ca: 0x0008, 0x8cb: 0x0040,
+ 0x8cc: 0x0040, 0x8cd: 0x0008, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0008,
+ 0x8d8: 0x0040, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0008, 0x8dd: 0x0008,
+ 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0040, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008,
+ 0x8e4: 0x0040, 0x8e5: 0x0008, 0x8e6: 0x0040, 0x8e7: 0x0008, 0x8e8: 0x0040, 0x8e9: 0x0040,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0040, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0008, 0x8f1: 0x3308, 0x8f2: 0x0008, 0x8f3: 0x0929, 0x8f4: 0x3308, 0x8f5: 0x3308,
+ 0x8f6: 0x3308, 0x8f7: 0x3308, 0x8f8: 0x3308, 0x8f9: 0x3308, 0x8fa: 0x0040, 0x8fb: 0x3308,
+ 0x8fc: 0x3308, 0x8fd: 0x0008, 0x8fe: 0x0040, 0x8ff: 0x0040,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0008, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x09d1, 0x904: 0x0008, 0x905: 0x0008,
+ 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0040, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008,
+ 0x90c: 0x0008, 0x90d: 0x0a09, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008,
+ 0x912: 0x0a41, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0a79,
+ 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0ab1, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0008, 0x925: 0x0008, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0ae9,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0040, 0x92e: 0x0040, 0x92f: 0x0040,
+ 0x930: 0x0040, 0x931: 0x3308, 0x932: 0x3308, 0x933: 0x0b21, 0x934: 0x3308, 0x935: 0x0b59,
+ 0x936: 0x0b91, 0x937: 0x0bc9, 0x938: 0x0c19, 0x939: 0x0c51, 0x93a: 0x3308, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x3308, 0x93e: 0x3308, 0x93f: 0x3008,
+ // Block 0x25, offset 0x940
+ 0x940: 0x3308, 0x941: 0x0ca1, 0x942: 0x3308, 0x943: 0x3308, 0x944: 0x3b08, 0x945: 0x0018,
+ 0x946: 0x3308, 0x947: 0x3308, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x3308, 0x94e: 0x3308, 0x94f: 0x3308, 0x950: 0x3308, 0x951: 0x3308,
+ 0x952: 0x3308, 0x953: 0x0cd9, 0x954: 0x3308, 0x955: 0x3308, 0x956: 0x3308, 0x957: 0x3308,
+ 0x958: 0x0040, 0x959: 0x3308, 0x95a: 0x3308, 0x95b: 0x3308, 0x95c: 0x3308, 0x95d: 0x0d11,
+ 0x95e: 0x3308, 0x95f: 0x3308, 0x960: 0x3308, 0x961: 0x3308, 0x962: 0x0d49, 0x963: 0x3308,
+ 0x964: 0x3308, 0x965: 0x3308, 0x966: 0x3308, 0x967: 0x0d81, 0x968: 0x3308, 0x969: 0x3308,
+ 0x96a: 0x3308, 0x96b: 0x3308, 0x96c: 0x0db9, 0x96d: 0x3308, 0x96e: 0x3308, 0x96f: 0x3308,
+ 0x970: 0x3308, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x3308, 0x974: 0x3308, 0x975: 0x3308,
+ 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x0df1, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x0040, 0x97e: 0x0018, 0x97f: 0x0018,
+ // Block 0x26, offset 0x980
+ 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0008, 0x984: 0x0008, 0x985: 0x0008,
+ 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x0008, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008,
+ 0x992: 0x0008, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0008,
+ 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0008, 0x99d: 0x0008,
+ 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008,
+ 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0008,
+ 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0039, 0x9ad: 0x0ed1, 0x9ae: 0x0ee9, 0x9af: 0x0008,
+ 0x9b0: 0x0ef9, 0x9b1: 0x0f09, 0x9b2: 0x0f19, 0x9b3: 0x0f31, 0x9b4: 0x0249, 0x9b5: 0x0f41,
+ 0x9b6: 0x0259, 0x9b7: 0x0f51, 0x9b8: 0x0359, 0x9b9: 0x0f61, 0x9ba: 0x0f71, 0x9bb: 0x0008,
+ 0x9bc: 0x00d9, 0x9bd: 0x0f81, 0x9be: 0x0f99, 0x9bf: 0x0269,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0fa9, 0x9c1: 0x0fb9, 0x9c2: 0x0279, 0x9c3: 0x0039, 0x9c4: 0x0fc9, 0x9c5: 0x0fe1,
+ 0x9c6: 0x059d, 0x9c7: 0x0ee9, 0x9c8: 0x0ef9, 0x9c9: 0x0f09, 0x9ca: 0x0ff9, 0x9cb: 0x1011,
+ 0x9cc: 0x1029, 0x9cd: 0x0f31, 0x9ce: 0x0008, 0x9cf: 0x0f51, 0x9d0: 0x0f61, 0x9d1: 0x1041,
+ 0x9d2: 0x00d9, 0x9d3: 0x1059, 0x9d4: 0x05b5, 0x9d5: 0x05b5, 0x9d6: 0x0f99, 0x9d7: 0x0fa9,
+ 0x9d8: 0x0fb9, 0x9d9: 0x059d, 0x9da: 0x1071, 0x9db: 0x1089, 0x9dc: 0x05cd, 0x9dd: 0x1099,
+ 0x9de: 0x10b1, 0x9df: 0x10c9, 0x9e0: 0x10e1, 0x9e1: 0x10f9, 0x9e2: 0x0f41, 0x9e3: 0x0269,
+ 0x9e4: 0x0fb9, 0x9e5: 0x1089, 0x9e6: 0x1099, 0x9e7: 0x10b1, 0x9e8: 0x1111, 0x9e9: 0x10e1,
+ 0x9ea: 0x10f9, 0x9eb: 0x0008, 0x9ec: 0x0008, 0x9ed: 0x0008, 0x9ee: 0x0008, 0x9ef: 0x0008,
+ 0x9f0: 0x0008, 0x9f1: 0x0008, 0x9f2: 0x0008, 0x9f3: 0x0008, 0x9f4: 0x0008, 0x9f5: 0x0008,
+ 0x9f6: 0x0008, 0x9f7: 0x0008, 0x9f8: 0x1129, 0x9f9: 0x0008, 0x9fa: 0x0008, 0x9fb: 0x0008,
+ 0x9fc: 0x0008, 0x9fd: 0x0008, 0x9fe: 0x0008, 0x9ff: 0x0008,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008,
+ 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008,
+ 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008,
+ 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008,
+ 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x1141, 0xa1c: 0x1159, 0xa1d: 0x1169,
+ 0xa1e: 0x1181, 0xa1f: 0x1029, 0xa20: 0x1199, 0xa21: 0x11a9, 0xa22: 0x11c1, 0xa23: 0x11d9,
+ 0xa24: 0x11f1, 0xa25: 0x1209, 0xa26: 0x1221, 0xa27: 0x05e5, 0xa28: 0x1239, 0xa29: 0x1251,
+ 0xa2a: 0xe17d, 0xa2b: 0x1269, 0xa2c: 0x1281, 0xa2d: 0x1299, 0xa2e: 0x12b1, 0xa2f: 0x12c9,
+ 0xa30: 0x12e1, 0xa31: 0x12f9, 0xa32: 0x1311, 0xa33: 0x1329, 0xa34: 0x1341, 0xa35: 0x1359,
+ 0xa36: 0x1371, 0xa37: 0x1389, 0xa38: 0x05fd, 0xa39: 0x13a1, 0xa3a: 0x13b9, 0xa3b: 0x13d1,
+ 0xa3c: 0x13e1, 0xa3d: 0x13f9, 0xa3e: 0x1411, 0xa3f: 0x1429,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008,
+ 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008,
+ 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008,
+ 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0xe00d, 0xa57: 0x0008,
+ 0xa58: 0xe00d, 0xa59: 0x0008, 0xa5a: 0xe00d, 0xa5b: 0x0008, 0xa5c: 0xe00d, 0xa5d: 0x0008,
+ 0xa5e: 0xe00d, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008,
+ 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008,
+ 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008,
+ 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008,
+ 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008,
+ 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008,
+ 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0615, 0xa9b: 0x0635, 0xa9c: 0x0008, 0xa9d: 0x0008,
+ 0xa9e: 0x1441, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0x0008, 0xac1: 0x0008, 0xac2: 0x0008, 0xac3: 0x0008, 0xac4: 0x0008, 0xac5: 0x0008,
+ 0xac6: 0x0040, 0xac7: 0x0040, 0xac8: 0xe045, 0xac9: 0xe045, 0xaca: 0xe045, 0xacb: 0xe045,
+ 0xacc: 0xe045, 0xacd: 0xe045, 0xace: 0x0040, 0xacf: 0x0040, 0xad0: 0x0008, 0xad1: 0x0008,
+ 0xad2: 0x0008, 0xad3: 0x0008, 0xad4: 0x0008, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0040, 0xad9: 0xe045, 0xada: 0x0040, 0xadb: 0xe045, 0xadc: 0x0040, 0xadd: 0xe045,
+ 0xade: 0x0040, 0xadf: 0xe045, 0xae0: 0x0008, 0xae1: 0x0008, 0xae2: 0x0008, 0xae3: 0x0008,
+ 0xae4: 0x0008, 0xae5: 0x0008, 0xae6: 0x0008, 0xae7: 0x0008, 0xae8: 0xe045, 0xae9: 0xe045,
+ 0xaea: 0xe045, 0xaeb: 0xe045, 0xaec: 0xe045, 0xaed: 0xe045, 0xaee: 0xe045, 0xaef: 0xe045,
+ 0xaf0: 0x0008, 0xaf1: 0x1459, 0xaf2: 0x0008, 0xaf3: 0x1471, 0xaf4: 0x0008, 0xaf5: 0x1489,
+ 0xaf6: 0x0008, 0xaf7: 0x14a1, 0xaf8: 0x0008, 0xaf9: 0x14b9, 0xafa: 0x0008, 0xafb: 0x14d1,
+ 0xafc: 0x0008, 0xafd: 0x14e9, 0xafe: 0x0040, 0xaff: 0x0040,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x1501, 0xb01: 0x1531, 0xb02: 0x1561, 0xb03: 0x1591, 0xb04: 0x15c1, 0xb05: 0x15f1,
+ 0xb06: 0x1621, 0xb07: 0x1651, 0xb08: 0x1501, 0xb09: 0x1531, 0xb0a: 0x1561, 0xb0b: 0x1591,
+ 0xb0c: 0x15c1, 0xb0d: 0x15f1, 0xb0e: 0x1621, 0xb0f: 0x1651, 0xb10: 0x1681, 0xb11: 0x16b1,
+ 0xb12: 0x16e1, 0xb13: 0x1711, 0xb14: 0x1741, 0xb15: 0x1771, 0xb16: 0x17a1, 0xb17: 0x17d1,
+ 0xb18: 0x1681, 0xb19: 0x16b1, 0xb1a: 0x16e1, 0xb1b: 0x1711, 0xb1c: 0x1741, 0xb1d: 0x1771,
+ 0xb1e: 0x17a1, 0xb1f: 0x17d1, 0xb20: 0x1801, 0xb21: 0x1831, 0xb22: 0x1861, 0xb23: 0x1891,
+ 0xb24: 0x18c1, 0xb25: 0x18f1, 0xb26: 0x1921, 0xb27: 0x1951, 0xb28: 0x1801, 0xb29: 0x1831,
+ 0xb2a: 0x1861, 0xb2b: 0x1891, 0xb2c: 0x18c1, 0xb2d: 0x18f1, 0xb2e: 0x1921, 0xb2f: 0x1951,
+ 0xb30: 0x0008, 0xb31: 0x0008, 0xb32: 0x1981, 0xb33: 0x19b1, 0xb34: 0x19d9, 0xb35: 0x0040,
+ 0xb36: 0x0008, 0xb37: 0x1a01, 0xb38: 0xe045, 0xb39: 0xe045, 0xb3a: 0x064d, 0xb3b: 0x1459,
+ 0xb3c: 0x19b1, 0xb3d: 0x0666, 0xb3e: 0x1a31, 0xb3f: 0x0686,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x06a6, 0xb41: 0x1a4a, 0xb42: 0x1a79, 0xb43: 0x1aa9, 0xb44: 0x1ad1, 0xb45: 0x0040,
+ 0xb46: 0x0008, 0xb47: 0x1af9, 0xb48: 0x06c5, 0xb49: 0x1471, 0xb4a: 0x06dd, 0xb4b: 0x1489,
+ 0xb4c: 0x1aa9, 0xb4d: 0x1b2a, 0xb4e: 0x1b5a, 0xb4f: 0x1b8a, 0xb50: 0x0008, 0xb51: 0x0008,
+ 0xb52: 0x0008, 0xb53: 0x1bb9, 0xb54: 0x0040, 0xb55: 0x0040, 0xb56: 0x0008, 0xb57: 0x0008,
+ 0xb58: 0xe045, 0xb59: 0xe045, 0xb5a: 0x06f5, 0xb5b: 0x14a1, 0xb5c: 0x0040, 0xb5d: 0x1bd2,
+ 0xb5e: 0x1c02, 0xb5f: 0x1c32, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x1c61,
+ 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045,
+ 0xb6a: 0x070d, 0xb6b: 0x14d1, 0xb6c: 0xe04d, 0xb6d: 0x1c7a, 0xb6e: 0x03d2, 0xb6f: 0x1caa,
+ 0xb70: 0x0040, 0xb71: 0x0040, 0xb72: 0x1cb9, 0xb73: 0x1ce9, 0xb74: 0x1d11, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1d39, 0xb78: 0x0725, 0xb79: 0x14b9, 0xb7a: 0x0515, 0xb7b: 0x14e9,
+ 0xb7c: 0x1ce9, 0xb7d: 0x073e, 0xb7e: 0x075e, 0xb7f: 0x0040,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x000a, 0xb81: 0x000a, 0xb82: 0x000a, 0xb83: 0x000a, 0xb84: 0x000a, 0xb85: 0x000a,
+ 0xb86: 0x000a, 0xb87: 0x000a, 0xb88: 0x000a, 0xb89: 0x000a, 0xb8a: 0x000a, 0xb8b: 0x03c0,
+ 0xb8c: 0x0003, 0xb8d: 0x0003, 0xb8e: 0x0340, 0xb8f: 0x0b40, 0xb90: 0x0018, 0xb91: 0xe00d,
+ 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x077e,
+ 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018,
+ 0xb9e: 0x0018, 0xb9f: 0x0018, 0xba0: 0x0018, 0xba1: 0x0018, 0xba2: 0x0018, 0xba3: 0x0018,
+ 0xba4: 0x0040, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0018, 0xba8: 0x0040, 0xba9: 0x0040,
+ 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x000a,
+ 0xbb0: 0x0018, 0xbb1: 0x0018, 0xbb2: 0x0018, 0xbb3: 0x1d69, 0xbb4: 0x1da1, 0xbb5: 0x0018,
+ 0xbb6: 0x1df1, 0xbb7: 0x1e29, 0xbb8: 0x0018, 0xbb9: 0x0018, 0xbba: 0x0018, 0xbbb: 0x0018,
+ 0xbbc: 0x1e7a, 0xbbd: 0x0018, 0xbbe: 0x079e, 0xbbf: 0x0018,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x0018, 0xbc1: 0x0018, 0xbc2: 0x0018, 0xbc3: 0x0018, 0xbc4: 0x0018, 0xbc5: 0x0018,
+ 0xbc6: 0x0018, 0xbc7: 0x1e92, 0xbc8: 0x1eaa, 0xbc9: 0x1ec2, 0xbca: 0x0018, 0xbcb: 0x0018,
+ 0xbcc: 0x0018, 0xbcd: 0x0018, 0xbce: 0x0018, 0xbcf: 0x0018, 0xbd0: 0x0018, 0xbd1: 0x0018,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x1ed9,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x000a, 0xbe0: 0x03c0, 0xbe1: 0x0340, 0xbe2: 0x0340, 0xbe3: 0x0340,
+ 0xbe4: 0x03c0, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0040, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x0340,
+ 0xbf0: 0x1f41, 0xbf1: 0x0f41, 0xbf2: 0x0040, 0xbf3: 0x0040, 0xbf4: 0x1f51, 0xbf5: 0x1f61,
+ 0xbf6: 0x1f71, 0xbf7: 0x1f81, 0xbf8: 0x1f91, 0xbf9: 0x1fa1, 0xbfa: 0x1fb2, 0xbfb: 0x07bd,
+ 0xbfc: 0x1fc2, 0xbfd: 0x1fd2, 0xbfe: 0x1fe2, 0xbff: 0x0f71,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x1f41, 0xc01: 0x00c9, 0xc02: 0x0069, 0xc03: 0x0079, 0xc04: 0x1f51, 0xc05: 0x1f61,
+ 0xc06: 0x1f71, 0xc07: 0x1f81, 0xc08: 0x1f91, 0xc09: 0x1fa1, 0xc0a: 0x1fb2, 0xc0b: 0x07d5,
+ 0xc0c: 0x1fc2, 0xc0d: 0x1fd2, 0xc0e: 0x1fe2, 0xc0f: 0x0040, 0xc10: 0x0039, 0xc11: 0x0f09,
+ 0xc12: 0x00d9, 0xc13: 0x0369, 0xc14: 0x0ff9, 0xc15: 0x0249, 0xc16: 0x0f51, 0xc17: 0x0359,
+ 0xc18: 0x0f61, 0xc19: 0x0f71, 0xc1a: 0x0f99, 0xc1b: 0x01d9, 0xc1c: 0x0fa9, 0xc1d: 0x0040,
+ 0xc1e: 0x0040, 0xc1f: 0x0040, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018,
+ 0xc24: 0x0018, 0xc25: 0x0018, 0xc26: 0x0018, 0xc27: 0x0018, 0xc28: 0x1ff1, 0xc29: 0x0018,
+ 0xc2a: 0x0018, 0xc2b: 0x0018, 0xc2c: 0x0018, 0xc2d: 0x0018, 0xc2e: 0x0018, 0xc2f: 0x0018,
+ 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0018, 0xc34: 0x0018, 0xc35: 0x0018,
+ 0xc36: 0x0018, 0xc37: 0x0018, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018,
+ 0xc3c: 0x0018, 0xc3d: 0x0018, 0xc3e: 0x0018, 0xc3f: 0x0040,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x07ee, 0xc41: 0x080e, 0xc42: 0x1159, 0xc43: 0x082d, 0xc44: 0x0018, 0xc45: 0x084e,
+ 0xc46: 0x086e, 0xc47: 0x1011, 0xc48: 0x0018, 0xc49: 0x088d, 0xc4a: 0x0f31, 0xc4b: 0x0249,
+ 0xc4c: 0x0249, 0xc4d: 0x0249, 0xc4e: 0x0249, 0xc4f: 0x2009, 0xc50: 0x0f41, 0xc51: 0x0f41,
+ 0xc52: 0x0359, 0xc53: 0x0359, 0xc54: 0x0018, 0xc55: 0x0f71, 0xc56: 0x2021, 0xc57: 0x0018,
+ 0xc58: 0x0018, 0xc59: 0x0f99, 0xc5a: 0x2039, 0xc5b: 0x0269, 0xc5c: 0x0269, 0xc5d: 0x0269,
+ 0xc5e: 0x0018, 0xc5f: 0x0018, 0xc60: 0x2049, 0xc61: 0x08ad, 0xc62: 0x2061, 0xc63: 0x0018,
+ 0xc64: 0x13d1, 0xc65: 0x0018, 0xc66: 0x2079, 0xc67: 0x0018, 0xc68: 0x13d1, 0xc69: 0x0018,
+ 0xc6a: 0x0f51, 0xc6b: 0x2091, 0xc6c: 0x0ee9, 0xc6d: 0x1159, 0xc6e: 0x0018, 0xc6f: 0x0f09,
+ 0xc70: 0x0f09, 0xc71: 0x1199, 0xc72: 0x0040, 0xc73: 0x0f61, 0xc74: 0x00d9, 0xc75: 0x20a9,
+ 0xc76: 0x20c1, 0xc77: 0x20d9, 0xc78: 0x20f1, 0xc79: 0x0f41, 0xc7a: 0x0018, 0xc7b: 0x08cd,
+ 0xc7c: 0x2109, 0xc7d: 0x10b1, 0xc7e: 0x10b1, 0xc7f: 0x2109,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x08ed, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0ef9,
+ 0xc86: 0x0ef9, 0xc87: 0x0f09, 0xc88: 0x0f41, 0xc89: 0x0259, 0xc8a: 0x0018, 0xc8b: 0x0018,
+ 0xc8c: 0x0018, 0xc8d: 0x0018, 0xc8e: 0x0008, 0xc8f: 0x0018, 0xc90: 0x2121, 0xc91: 0x2151,
+ 0xc92: 0x2181, 0xc93: 0x21b9, 0xc94: 0x21e9, 0xc95: 0x2219, 0xc96: 0x2249, 0xc97: 0x2279,
+ 0xc98: 0x22a9, 0xc99: 0x22d9, 0xc9a: 0x2309, 0xc9b: 0x2339, 0xc9c: 0x2369, 0xc9d: 0x2399,
+ 0xc9e: 0x23c9, 0xc9f: 0x23f9, 0xca0: 0x0f41, 0xca1: 0x2421, 0xca2: 0x0905, 0xca3: 0x2439,
+ 0xca4: 0x1089, 0xca5: 0x2451, 0xca6: 0x0925, 0xca7: 0x2469, 0xca8: 0x2491, 0xca9: 0x0369,
+ 0xcaa: 0x24a9, 0xcab: 0x0945, 0xcac: 0x0359, 0xcad: 0x1159, 0xcae: 0x0ef9, 0xcaf: 0x0f61,
+ 0xcb0: 0x0f41, 0xcb1: 0x2421, 0xcb2: 0x0965, 0xcb3: 0x2439, 0xcb4: 0x1089, 0xcb5: 0x2451,
+ 0xcb6: 0x0985, 0xcb7: 0x2469, 0xcb8: 0x2491, 0xcb9: 0x0369, 0xcba: 0x24a9, 0xcbb: 0x09a5,
+ 0xcbc: 0x0359, 0xcbd: 0x1159, 0xcbe: 0x0ef9, 0xcbf: 0x0f61,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x0018, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0018,
+ 0xcc6: 0x0018, 0xcc7: 0x0018, 0xcc8: 0x0018, 0xcc9: 0x0018, 0xcca: 0x0018, 0xccb: 0x0040,
+ 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040,
+ 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040,
+ 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0040, 0xcdd: 0x0040,
+ 0xcde: 0x0040, 0xcdf: 0x0040, 0xce0: 0x00c9, 0xce1: 0x0069, 0xce2: 0x0079, 0xce3: 0x1f51,
+ 0xce4: 0x1f61, 0xce5: 0x1f71, 0xce6: 0x1f81, 0xce7: 0x1f91, 0xce8: 0x1fa1, 0xce9: 0x2601,
+ 0xcea: 0x2619, 0xceb: 0x2631, 0xcec: 0x2649, 0xced: 0x2661, 0xcee: 0x2679, 0xcef: 0x2691,
+ 0xcf0: 0x26a9, 0xcf1: 0x26c1, 0xcf2: 0x26d9, 0xcf3: 0x26f1, 0xcf4: 0x0a06, 0xcf5: 0x0a26,
+ 0xcf6: 0x0a46, 0xcf7: 0x0a66, 0xcf8: 0x0a86, 0xcf9: 0x0aa6, 0xcfa: 0x0ac6, 0xcfb: 0x0ae6,
+ 0xcfc: 0x0b06, 0xcfd: 0x270a, 0xcfe: 0x2732, 0xcff: 0x275a,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x2782, 0xd01: 0x27aa, 0xd02: 0x27d2, 0xd03: 0x27fa, 0xd04: 0x2822, 0xd05: 0x284a,
+ 0xd06: 0x2872, 0xd07: 0x289a, 0xd08: 0x0040, 0xd09: 0x0040, 0xd0a: 0x0040, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0b26, 0xd1d: 0x0b46,
+ 0xd1e: 0x0b66, 0xd1f: 0x0b86, 0xd20: 0x0ba6, 0xd21: 0x0bc6, 0xd22: 0x0be6, 0xd23: 0x0c06,
+ 0xd24: 0x0c26, 0xd25: 0x0c46, 0xd26: 0x0c66, 0xd27: 0x0c86, 0xd28: 0x0ca6, 0xd29: 0x0cc6,
+ 0xd2a: 0x0ce6, 0xd2b: 0x0d06, 0xd2c: 0x0d26, 0xd2d: 0x0d46, 0xd2e: 0x0d66, 0xd2f: 0x0d86,
+ 0xd30: 0x0da6, 0xd31: 0x0dc6, 0xd32: 0x0de6, 0xd33: 0x0e06, 0xd34: 0x0e26, 0xd35: 0x0e46,
+ 0xd36: 0x0039, 0xd37: 0x0ee9, 0xd38: 0x1159, 0xd39: 0x0ef9, 0xd3a: 0x0f09, 0xd3b: 0x1199,
+ 0xd3c: 0x0f31, 0xd3d: 0x0249, 0xd3e: 0x0f41, 0xd3f: 0x0259,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x0f51, 0xd41: 0x0359, 0xd42: 0x0f61, 0xd43: 0x0f71, 0xd44: 0x00d9, 0xd45: 0x0f99,
+ 0xd46: 0x2039, 0xd47: 0x0269, 0xd48: 0x01d9, 0xd49: 0x0fa9, 0xd4a: 0x0fb9, 0xd4b: 0x1089,
+ 0xd4c: 0x0279, 0xd4d: 0x0369, 0xd4e: 0x0289, 0xd4f: 0x13d1, 0xd50: 0x0039, 0xd51: 0x0ee9,
+ 0xd52: 0x1159, 0xd53: 0x0ef9, 0xd54: 0x0f09, 0xd55: 0x1199, 0xd56: 0x0f31, 0xd57: 0x0249,
+ 0xd58: 0x0f41, 0xd59: 0x0259, 0xd5a: 0x0f51, 0xd5b: 0x0359, 0xd5c: 0x0f61, 0xd5d: 0x0f71,
+ 0xd5e: 0x00d9, 0xd5f: 0x0f99, 0xd60: 0x2039, 0xd61: 0x0269, 0xd62: 0x01d9, 0xd63: 0x0fa9,
+ 0xd64: 0x0fb9, 0xd65: 0x1089, 0xd66: 0x0279, 0xd67: 0x0369, 0xd68: 0x0289, 0xd69: 0x13d1,
+ 0xd6a: 0x1f41, 0xd6b: 0x0018, 0xd6c: 0x0018, 0xd6d: 0x0018, 0xd6e: 0x0018, 0xd6f: 0x0018,
+ 0xd70: 0x0018, 0xd71: 0x0018, 0xd72: 0x0018, 0xd73: 0x0018, 0xd74: 0x0018, 0xd75: 0x0018,
+ 0xd76: 0x0018, 0xd77: 0x0018, 0xd78: 0x0018, 0xd79: 0x0018, 0xd7a: 0x0018, 0xd7b: 0x0018,
+ 0xd7c: 0x0018, 0xd7d: 0x0018, 0xd7e: 0x0018, 0xd7f: 0x0018,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0008, 0xd81: 0x0008, 0xd82: 0x0008, 0xd83: 0x0008, 0xd84: 0x0008, 0xd85: 0x0008,
+ 0xd86: 0x0008, 0xd87: 0x0008, 0xd88: 0x0008, 0xd89: 0x0008, 0xd8a: 0x0008, 0xd8b: 0x0008,
+ 0xd8c: 0x0008, 0xd8d: 0x0008, 0xd8e: 0x0008, 0xd8f: 0x0008, 0xd90: 0x0008, 0xd91: 0x0008,
+ 0xd92: 0x0008, 0xd93: 0x0008, 0xd94: 0x0008, 0xd95: 0x0008, 0xd96: 0x0008, 0xd97: 0x0008,
+ 0xd98: 0x0008, 0xd99: 0x0008, 0xd9a: 0x0008, 0xd9b: 0x0008, 0xd9c: 0x0008, 0xd9d: 0x0008,
+ 0xd9e: 0x0008, 0xd9f: 0x0040, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0x2971, 0xda3: 0x0ebd,
+ 0xda4: 0x2989, 0xda5: 0x0008, 0xda6: 0x0008, 0xda7: 0xe07d, 0xda8: 0x0008, 0xda9: 0xe01d,
+ 0xdaa: 0x0008, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0x0fe1, 0xdae: 0x1281, 0xdaf: 0x0fc9,
+ 0xdb0: 0x1141, 0xdb1: 0x0008, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0008, 0xdb5: 0xe01d,
+ 0xdb6: 0x0008, 0xdb7: 0x0008, 0xdb8: 0x0008, 0xdb9: 0x0008, 0xdba: 0x0008, 0xdbb: 0x0008,
+ 0xdbc: 0x0259, 0xdbd: 0x1089, 0xdbe: 0x29a1, 0xdbf: 0x29b9,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0xe00d, 0xdc1: 0x0008, 0xdc2: 0xe00d, 0xdc3: 0x0008, 0xdc4: 0xe00d, 0xdc5: 0x0008,
+ 0xdc6: 0xe00d, 0xdc7: 0x0008, 0xdc8: 0xe00d, 0xdc9: 0x0008, 0xdca: 0xe00d, 0xdcb: 0x0008,
+ 0xdcc: 0xe00d, 0xdcd: 0x0008, 0xdce: 0xe00d, 0xdcf: 0x0008, 0xdd0: 0xe00d, 0xdd1: 0x0008,
+ 0xdd2: 0xe00d, 0xdd3: 0x0008, 0xdd4: 0xe00d, 0xdd5: 0x0008, 0xdd6: 0xe00d, 0xdd7: 0x0008,
+ 0xdd8: 0xe00d, 0xdd9: 0x0008, 0xdda: 0xe00d, 0xddb: 0x0008, 0xddc: 0xe00d, 0xddd: 0x0008,
+ 0xdde: 0xe00d, 0xddf: 0x0008, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0xe00d, 0xde3: 0x0008,
+ 0xde4: 0x0008, 0xde5: 0x0018, 0xde6: 0x0018, 0xde7: 0x0018, 0xde8: 0x0018, 0xde9: 0x0018,
+ 0xdea: 0x0018, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0xe01d, 0xdee: 0x0008, 0xdef: 0x3308,
+ 0xdf0: 0x3308, 0xdf1: 0x3308, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0040, 0xdf5: 0x0040,
+ 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018,
+ 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x26fd, 0xe01: 0x271d, 0xe02: 0x273d, 0xe03: 0x275d, 0xe04: 0x277d, 0xe05: 0x279d,
+ 0xe06: 0x27bd, 0xe07: 0x27dd, 0xe08: 0x27fd, 0xe09: 0x281d, 0xe0a: 0x283d, 0xe0b: 0x285d,
+ 0xe0c: 0x287d, 0xe0d: 0x289d, 0xe0e: 0x28bd, 0xe0f: 0x28dd, 0xe10: 0x28fd, 0xe11: 0x291d,
+ 0xe12: 0x293d, 0xe13: 0x295d, 0xe14: 0x297d, 0xe15: 0x299d, 0xe16: 0x0040, 0xe17: 0x0040,
+ 0xe18: 0x0040, 0xe19: 0x0040, 0xe1a: 0x0040, 0xe1b: 0x0040, 0xe1c: 0x0040, 0xe1d: 0x0040,
+ 0xe1e: 0x0040, 0xe1f: 0x0040, 0xe20: 0x0040, 0xe21: 0x0040, 0xe22: 0x0040, 0xe23: 0x0040,
+ 0xe24: 0x0040, 0xe25: 0x0040, 0xe26: 0x0040, 0xe27: 0x0040, 0xe28: 0x0040, 0xe29: 0x0040,
+ 0xe2a: 0x0040, 0xe2b: 0x0040, 0xe2c: 0x0040, 0xe2d: 0x0040, 0xe2e: 0x0040, 0xe2f: 0x0040,
+ 0xe30: 0x0040, 0xe31: 0x0040, 0xe32: 0x0040, 0xe33: 0x0040, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0040, 0xe3a: 0x0040, 0xe3b: 0x0040,
+ 0xe3c: 0x0040, 0xe3d: 0x0040, 0xe3e: 0x0040, 0xe3f: 0x0040,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x000a, 0xe41: 0x0018, 0xe42: 0x29d1, 0xe43: 0x0018, 0xe44: 0x0018, 0xe45: 0x0008,
+ 0xe46: 0x0008, 0xe47: 0x0008, 0xe48: 0x0018, 0xe49: 0x0018, 0xe4a: 0x0018, 0xe4b: 0x0018,
+ 0xe4c: 0x0018, 0xe4d: 0x0018, 0xe4e: 0x0018, 0xe4f: 0x0018, 0xe50: 0x0018, 0xe51: 0x0018,
+ 0xe52: 0x0018, 0xe53: 0x0018, 0xe54: 0x0018, 0xe55: 0x0018, 0xe56: 0x0018, 0xe57: 0x0018,
+ 0xe58: 0x0018, 0xe59: 0x0018, 0xe5a: 0x0018, 0xe5b: 0x0018, 0xe5c: 0x0018, 0xe5d: 0x0018,
+ 0xe5e: 0x0018, 0xe5f: 0x0018, 0xe60: 0x0018, 0xe61: 0x0018, 0xe62: 0x0018, 0xe63: 0x0018,
+ 0xe64: 0x0018, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018,
+ 0xe6a: 0x3308, 0xe6b: 0x3308, 0xe6c: 0x3308, 0xe6d: 0x3308, 0xe6e: 0x3018, 0xe6f: 0x3018,
+ 0xe70: 0x0018, 0xe71: 0x0018, 0xe72: 0x0018, 0xe73: 0x0018, 0xe74: 0x0018, 0xe75: 0x0018,
+ 0xe76: 0xe125, 0xe77: 0x0018, 0xe78: 0x29bd, 0xe79: 0x29dd, 0xe7a: 0x29fd, 0xe7b: 0x0018,
+ 0xe7c: 0x0008, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x2b3d, 0xe81: 0x2b5d, 0xe82: 0x2b7d, 0xe83: 0x2b9d, 0xe84: 0x2bbd, 0xe85: 0x2bdd,
+ 0xe86: 0x2bdd, 0xe87: 0x2bdd, 0xe88: 0x2bfd, 0xe89: 0x2bfd, 0xe8a: 0x2bfd, 0xe8b: 0x2bfd,
+ 0xe8c: 0x2c1d, 0xe8d: 0x2c1d, 0xe8e: 0x2c1d, 0xe8f: 0x2c3d, 0xe90: 0x2c5d, 0xe91: 0x2c5d,
+ 0xe92: 0x2a7d, 0xe93: 0x2a7d, 0xe94: 0x2c5d, 0xe95: 0x2c5d, 0xe96: 0x2c7d, 0xe97: 0x2c7d,
+ 0xe98: 0x2c5d, 0xe99: 0x2c5d, 0xe9a: 0x2a7d, 0xe9b: 0x2a7d, 0xe9c: 0x2c5d, 0xe9d: 0x2c5d,
+ 0xe9e: 0x2c3d, 0xe9f: 0x2c3d, 0xea0: 0x2c9d, 0xea1: 0x2c9d, 0xea2: 0x2cbd, 0xea3: 0x2cbd,
+ 0xea4: 0x0040, 0xea5: 0x2cdd, 0xea6: 0x2cfd, 0xea7: 0x2d1d, 0xea8: 0x2d1d, 0xea9: 0x2d3d,
+ 0xeaa: 0x2d5d, 0xeab: 0x2d7d, 0xeac: 0x2d9d, 0xead: 0x2dbd, 0xeae: 0x2ddd, 0xeaf: 0x2dfd,
+ 0xeb0: 0x2e1d, 0xeb1: 0x2e3d, 0xeb2: 0x2e3d, 0xeb3: 0x2e5d, 0xeb4: 0x2e7d, 0xeb5: 0x2e7d,
+ 0xeb6: 0x2e9d, 0xeb7: 0x2ebd, 0xeb8: 0x2e5d, 0xeb9: 0x2edd, 0xeba: 0x2efd, 0xebb: 0x2edd,
+ 0xebc: 0x2e5d, 0xebd: 0x2f1d, 0xebe: 0x2f3d, 0xebf: 0x2f5d,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2f7d, 0xec1: 0x2f9d, 0xec2: 0x2cfd, 0xec3: 0x2cdd, 0xec4: 0x2fbd, 0xec5: 0x2fdd,
+ 0xec6: 0x2ffd, 0xec7: 0x301d, 0xec8: 0x303d, 0xec9: 0x305d, 0xeca: 0x307d, 0xecb: 0x309d,
+ 0xecc: 0x30bd, 0xecd: 0x30dd, 0xece: 0x30fd, 0xecf: 0x0040, 0xed0: 0x0018, 0xed1: 0x0018,
+ 0xed2: 0x311d, 0xed3: 0x313d, 0xed4: 0x315d, 0xed5: 0x317d, 0xed6: 0x319d, 0xed7: 0x31bd,
+ 0xed8: 0x31dd, 0xed9: 0x31fd, 0xeda: 0x321d, 0xedb: 0x323d, 0xedc: 0x315d, 0xedd: 0x325d,
+ 0xede: 0x327d, 0xedf: 0x329d, 0xee0: 0x0008, 0xee1: 0x0008, 0xee2: 0x0008, 0xee3: 0x0008,
+ 0xee4: 0x0008, 0xee5: 0x0008, 0xee6: 0x0008, 0xee7: 0x0008, 0xee8: 0x0008, 0xee9: 0x0008,
+ 0xeea: 0x0008, 0xeeb: 0x0008, 0xeec: 0x0008, 0xeed: 0x0008, 0xeee: 0x0008, 0xeef: 0x0008,
+ 0xef0: 0x0008, 0xef1: 0x0008, 0xef2: 0x0008, 0xef3: 0x0008, 0xef4: 0x0008, 0xef5: 0x0008,
+ 0xef6: 0x0008, 0xef7: 0x0008, 0xef8: 0x0008, 0xef9: 0x0008, 0xefa: 0x0008, 0xefb: 0x0040,
+ 0xefc: 0x0040, 0xefd: 0x0040, 0xefe: 0x0040, 0xeff: 0x0040,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x36a2, 0xf01: 0x36d2, 0xf02: 0x3702, 0xf03: 0x3732, 0xf04: 0x32bd, 0xf05: 0x32dd,
+ 0xf06: 0x32fd, 0xf07: 0x331d, 0xf08: 0x0018, 0xf09: 0x0018, 0xf0a: 0x0018, 0xf0b: 0x0018,
+ 0xf0c: 0x0018, 0xf0d: 0x0018, 0xf0e: 0x0018, 0xf0f: 0x0018, 0xf10: 0x333d, 0xf11: 0x3761,
+ 0xf12: 0x3779, 0xf13: 0x3791, 0xf14: 0x37a9, 0xf15: 0x37c1, 0xf16: 0x37d9, 0xf17: 0x37f1,
+ 0xf18: 0x3809, 0xf19: 0x3821, 0xf1a: 0x3839, 0xf1b: 0x3851, 0xf1c: 0x3869, 0xf1d: 0x3881,
+ 0xf1e: 0x3899, 0xf1f: 0x38b1, 0xf20: 0x335d, 0xf21: 0x337d, 0xf22: 0x339d, 0xf23: 0x33bd,
+ 0xf24: 0x33dd, 0xf25: 0x33dd, 0xf26: 0x33fd, 0xf27: 0x341d, 0xf28: 0x343d, 0xf29: 0x345d,
+ 0xf2a: 0x347d, 0xf2b: 0x349d, 0xf2c: 0x34bd, 0xf2d: 0x34dd, 0xf2e: 0x34fd, 0xf2f: 0x351d,
+ 0xf30: 0x353d, 0xf31: 0x355d, 0xf32: 0x357d, 0xf33: 0x359d, 0xf34: 0x35bd, 0xf35: 0x35dd,
+ 0xf36: 0x35fd, 0xf37: 0x361d, 0xf38: 0x363d, 0xf39: 0x365d, 0xf3a: 0x367d, 0xf3b: 0x369d,
+ 0xf3c: 0x38c9, 0xf3d: 0x3901, 0xf3e: 0x36bd, 0xf3f: 0x0018,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36dd, 0xf41: 0x36fd, 0xf42: 0x371d, 0xf43: 0x373d, 0xf44: 0x375d, 0xf45: 0x377d,
+ 0xf46: 0x379d, 0xf47: 0x37bd, 0xf48: 0x37dd, 0xf49: 0x37fd, 0xf4a: 0x381d, 0xf4b: 0x383d,
+ 0xf4c: 0x385d, 0xf4d: 0x387d, 0xf4e: 0x389d, 0xf4f: 0x38bd, 0xf50: 0x38dd, 0xf51: 0x38fd,
+ 0xf52: 0x391d, 0xf53: 0x393d, 0xf54: 0x395d, 0xf55: 0x397d, 0xf56: 0x399d, 0xf57: 0x39bd,
+ 0xf58: 0x39dd, 0xf59: 0x39fd, 0xf5a: 0x3a1d, 0xf5b: 0x3a3d, 0xf5c: 0x3a5d, 0xf5d: 0x3a7d,
+ 0xf5e: 0x3a9d, 0xf5f: 0x3abd, 0xf60: 0x3add, 0xf61: 0x3afd, 0xf62: 0x3b1d, 0xf63: 0x3b3d,
+ 0xf64: 0x3b5d, 0xf65: 0x3b7d, 0xf66: 0x127d, 0xf67: 0x3b9d, 0xf68: 0x3bbd, 0xf69: 0x3bdd,
+ 0xf6a: 0x3bfd, 0xf6b: 0x3c1d, 0xf6c: 0x3c3d, 0xf6d: 0x3c5d, 0xf6e: 0x239d, 0xf6f: 0x3c7d,
+ 0xf70: 0x3c9d, 0xf71: 0x3939, 0xf72: 0x3951, 0xf73: 0x3969, 0xf74: 0x3981, 0xf75: 0x3999,
+ 0xf76: 0x39b1, 0xf77: 0x39c9, 0xf78: 0x39e1, 0xf79: 0x39f9, 0xf7a: 0x3a11, 0xf7b: 0x3a29,
+ 0xf7c: 0x3a41, 0xf7d: 0x3a59, 0xf7e: 0x3a71, 0xf7f: 0x3a89,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x3aa1, 0xf81: 0x3ac9, 0xf82: 0x3af1, 0xf83: 0x3b19, 0xf84: 0x3b41, 0xf85: 0x3b69,
+ 0xf86: 0x3b91, 0xf87: 0x3bb9, 0xf88: 0x3be1, 0xf89: 0x3c09, 0xf8a: 0x3c39, 0xf8b: 0x3c69,
+ 0xf8c: 0x3c99, 0xf8d: 0x3cbd, 0xf8e: 0x3cb1, 0xf8f: 0x3cdd, 0xf90: 0x3cfd, 0xf91: 0x3d15,
+ 0xf92: 0x3d2d, 0xf93: 0x3d45, 0xf94: 0x3d5d, 0xf95: 0x3d5d, 0xf96: 0x3d45, 0xf97: 0x3d75,
+ 0xf98: 0x07bd, 0xf99: 0x3d8d, 0xf9a: 0x3da5, 0xf9b: 0x3dbd, 0xf9c: 0x3dd5, 0xf9d: 0x3ded,
+ 0xf9e: 0x3e05, 0xf9f: 0x3e1d, 0xfa0: 0x3e35, 0xfa1: 0x3e4d, 0xfa2: 0x3e65, 0xfa3: 0x3e7d,
+ 0xfa4: 0x3e95, 0xfa5: 0x3e95, 0xfa6: 0x3ead, 0xfa7: 0x3ead, 0xfa8: 0x3ec5, 0xfa9: 0x3ec5,
+ 0xfaa: 0x3edd, 0xfab: 0x3ef5, 0xfac: 0x3f0d, 0xfad: 0x3f25, 0xfae: 0x3f3d, 0xfaf: 0x3f3d,
+ 0xfb0: 0x3f55, 0xfb1: 0x3f55, 0xfb2: 0x3f55, 0xfb3: 0x3f6d, 0xfb4: 0x3f85, 0xfb5: 0x3f9d,
+ 0xfb6: 0x3fb5, 0xfb7: 0x3f9d, 0xfb8: 0x3fcd, 0xfb9: 0x3fe5, 0xfba: 0x3f6d, 0xfbb: 0x3ffd,
+ 0xfbc: 0x4015, 0xfbd: 0x4015, 0xfbe: 0x4015, 0xfbf: 0x0040,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3cc9, 0xfc1: 0x3d31, 0xfc2: 0x3d99, 0xfc3: 0x3e01, 0xfc4: 0x3e51, 0xfc5: 0x3eb9,
+ 0xfc6: 0x3f09, 0xfc7: 0x3f59, 0xfc8: 0x3fd9, 0xfc9: 0x4041, 0xfca: 0x4091, 0xfcb: 0x40e1,
+ 0xfcc: 0x4131, 0xfcd: 0x4199, 0xfce: 0x4201, 0xfcf: 0x4251, 0xfd0: 0x42a1, 0xfd1: 0x42d9,
+ 0xfd2: 0x4329, 0xfd3: 0x4391, 0xfd4: 0x43f9, 0xfd5: 0x4431, 0xfd6: 0x44b1, 0xfd7: 0x4549,
+ 0xfd8: 0x45c9, 0xfd9: 0x4619, 0xfda: 0x4699, 0xfdb: 0x4719, 0xfdc: 0x4781, 0xfdd: 0x47d1,
+ 0xfde: 0x4821, 0xfdf: 0x4871, 0xfe0: 0x48d9, 0xfe1: 0x4959, 0xfe2: 0x49c1, 0xfe3: 0x4a11,
+ 0xfe4: 0x4a61, 0xfe5: 0x4ab1, 0xfe6: 0x4ae9, 0xfe7: 0x4b21, 0xfe8: 0x4b59, 0xfe9: 0x4b91,
+ 0xfea: 0x4be1, 0xfeb: 0x4c31, 0xfec: 0x4cb1, 0xfed: 0x4d01, 0xfee: 0x4d69, 0xfef: 0x4de9,
+ 0xff0: 0x4e39, 0xff1: 0x4e71, 0xff2: 0x4ea9, 0xff3: 0x4f29, 0xff4: 0x4f91, 0xff5: 0x5011,
+ 0xff6: 0x5061, 0xff7: 0x50e1, 0xff8: 0x5119, 0xff9: 0x5169, 0xffa: 0x51b9, 0xffb: 0x5209,
+ 0xffc: 0x5259, 0xffd: 0x52a9, 0xffe: 0x5311, 0xfff: 0x5361,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x5399, 0x1001: 0x53e9, 0x1002: 0x5439, 0x1003: 0x5489, 0x1004: 0x54f1, 0x1005: 0x5541,
+ 0x1006: 0x5591, 0x1007: 0x55e1, 0x1008: 0x5661, 0x1009: 0x56c9, 0x100a: 0x5701, 0x100b: 0x5781,
+ 0x100c: 0x57b9, 0x100d: 0x5821, 0x100e: 0x5889, 0x100f: 0x58d9, 0x1010: 0x5929, 0x1011: 0x5979,
+ 0x1012: 0x59e1, 0x1013: 0x5a19, 0x1014: 0x5a69, 0x1015: 0x5ad1, 0x1016: 0x5b09, 0x1017: 0x5b89,
+ 0x1018: 0x5bd9, 0x1019: 0x5c01, 0x101a: 0x5c29, 0x101b: 0x5c51, 0x101c: 0x5c79, 0x101d: 0x5ca1,
+ 0x101e: 0x5cc9, 0x101f: 0x5cf1, 0x1020: 0x5d19, 0x1021: 0x5d41, 0x1022: 0x5d69, 0x1023: 0x5d99,
+ 0x1024: 0x5dc9, 0x1025: 0x5df9, 0x1026: 0x5e29, 0x1027: 0x5e59, 0x1028: 0x5e89, 0x1029: 0x5eb9,
+ 0x102a: 0x5ee9, 0x102b: 0x5f19, 0x102c: 0x5f49, 0x102d: 0x5f79, 0x102e: 0x5fa9, 0x102f: 0x5fd9,
+ 0x1030: 0x6009, 0x1031: 0x402d, 0x1032: 0x6039, 0x1033: 0x6051, 0x1034: 0x404d, 0x1035: 0x6069,
+ 0x1036: 0x6081, 0x1037: 0x6099, 0x1038: 0x406d, 0x1039: 0x406d, 0x103a: 0x60b1, 0x103b: 0x60c9,
+ 0x103c: 0x6101, 0x103d: 0x6139, 0x103e: 0x6171, 0x103f: 0x61a9,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x6211, 0x1041: 0x6229, 0x1042: 0x408d, 0x1043: 0x6241, 0x1044: 0x6259, 0x1045: 0x6271,
+ 0x1046: 0x6289, 0x1047: 0x62a1, 0x1048: 0x40ad, 0x1049: 0x62b9, 0x104a: 0x62e1, 0x104b: 0x62f9,
+ 0x104c: 0x40cd, 0x104d: 0x40cd, 0x104e: 0x6311, 0x104f: 0x6329, 0x1050: 0x6341, 0x1051: 0x40ed,
+ 0x1052: 0x410d, 0x1053: 0x412d, 0x1054: 0x414d, 0x1055: 0x416d, 0x1056: 0x6359, 0x1057: 0x6371,
+ 0x1058: 0x6389, 0x1059: 0x63a1, 0x105a: 0x63b9, 0x105b: 0x418d, 0x105c: 0x63d1, 0x105d: 0x63e9,
+ 0x105e: 0x6401, 0x105f: 0x41ad, 0x1060: 0x41cd, 0x1061: 0x6419, 0x1062: 0x41ed, 0x1063: 0x420d,
+ 0x1064: 0x422d, 0x1065: 0x6431, 0x1066: 0x424d, 0x1067: 0x6449, 0x1068: 0x6479, 0x1069: 0x6211,
+ 0x106a: 0x426d, 0x106b: 0x428d, 0x106c: 0x42ad, 0x106d: 0x42cd, 0x106e: 0x64b1, 0x106f: 0x64f1,
+ 0x1070: 0x6539, 0x1071: 0x6551, 0x1072: 0x42ed, 0x1073: 0x6569, 0x1074: 0x6581, 0x1075: 0x6599,
+ 0x1076: 0x430d, 0x1077: 0x65b1, 0x1078: 0x65c9, 0x1079: 0x65b1, 0x107a: 0x65e1, 0x107b: 0x65f9,
+ 0x107c: 0x432d, 0x107d: 0x6611, 0x107e: 0x6629, 0x107f: 0x6611,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x434d, 0x1081: 0x436d, 0x1082: 0x0040, 0x1083: 0x6641, 0x1084: 0x6659, 0x1085: 0x6671,
+ 0x1086: 0x6689, 0x1087: 0x0040, 0x1088: 0x66c1, 0x1089: 0x66d9, 0x108a: 0x66f1, 0x108b: 0x6709,
+ 0x108c: 0x6721, 0x108d: 0x6739, 0x108e: 0x6401, 0x108f: 0x6751, 0x1090: 0x6769, 0x1091: 0x6781,
+ 0x1092: 0x438d, 0x1093: 0x6799, 0x1094: 0x6289, 0x1095: 0x43ad, 0x1096: 0x43cd, 0x1097: 0x67b1,
+ 0x1098: 0x0040, 0x1099: 0x43ed, 0x109a: 0x67c9, 0x109b: 0x67e1, 0x109c: 0x67f9, 0x109d: 0x6811,
+ 0x109e: 0x6829, 0x109f: 0x6859, 0x10a0: 0x6889, 0x10a1: 0x68b1, 0x10a2: 0x68d9, 0x10a3: 0x6901,
+ 0x10a4: 0x6929, 0x10a5: 0x6951, 0x10a6: 0x6979, 0x10a7: 0x69a1, 0x10a8: 0x69c9, 0x10a9: 0x69f1,
+ 0x10aa: 0x6a21, 0x10ab: 0x6a51, 0x10ac: 0x6a81, 0x10ad: 0x6ab1, 0x10ae: 0x6ae1, 0x10af: 0x6b11,
+ 0x10b0: 0x6b41, 0x10b1: 0x6b71, 0x10b2: 0x6ba1, 0x10b3: 0x6bd1, 0x10b4: 0x6c01, 0x10b5: 0x6c31,
+ 0x10b6: 0x6c61, 0x10b7: 0x6c91, 0x10b8: 0x6cc1, 0x10b9: 0x6cf1, 0x10ba: 0x6d21, 0x10bb: 0x6d51,
+ 0x10bc: 0x6d81, 0x10bd: 0x6db1, 0x10be: 0x6de1, 0x10bf: 0x440d,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008,
+ 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008,
+ 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008,
+ 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008,
+ 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008,
+ 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008,
+ 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008,
+ 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308,
+ 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308,
+ 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308,
+ 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x0ea1, 0x111d: 0x6e11,
+ 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008,
+ 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008,
+ 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008,
+ 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008,
+ 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008,
+ 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018,
+ 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018,
+ 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018,
+ 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008,
+ 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008,
+ 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008,
+ 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008,
+ 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008,
+ 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008,
+ 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008,
+ 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008,
+ 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008,
+ 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008,
+ 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008,
+ 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008,
+ 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d,
+ 0x11bc: 0x0008, 0x11bd: 0x442d, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d,
+ 0x11cc: 0x0008, 0x11cd: 0x11d9, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0x6e29, 0x11eb: 0x1029, 0x11ec: 0x11c1, 0x11ed: 0x6e41, 0x11ee: 0x1221, 0x11ef: 0x0040,
+ 0x11f0: 0x6e59, 0x11f1: 0x6e71, 0x11f2: 0x1239, 0x11f3: 0x444d, 0x11f4: 0xe00d, 0x11f5: 0x0008,
+ 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0x0040, 0x11f9: 0x0040, 0x11fa: 0x0040, 0x11fb: 0x0040,
+ 0x11fc: 0x0040, 0x11fd: 0x0040, 0x11fe: 0x0040, 0x11ff: 0x0040,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x64d5, 0x1201: 0x64f5, 0x1202: 0x6515, 0x1203: 0x6535, 0x1204: 0x6555, 0x1205: 0x6575,
+ 0x1206: 0x6595, 0x1207: 0x65b5, 0x1208: 0x65d5, 0x1209: 0x65f5, 0x120a: 0x6615, 0x120b: 0x6635,
+ 0x120c: 0x6655, 0x120d: 0x6675, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0x6695, 0x1211: 0x0008,
+ 0x1212: 0x66b5, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x66d5, 0x1216: 0x66f5, 0x1217: 0x6715,
+ 0x1218: 0x6735, 0x1219: 0x6755, 0x121a: 0x6775, 0x121b: 0x6795, 0x121c: 0x67b5, 0x121d: 0x67d5,
+ 0x121e: 0x67f5, 0x121f: 0x0008, 0x1220: 0x6815, 0x1221: 0x0008, 0x1222: 0x6835, 0x1223: 0x0008,
+ 0x1224: 0x0008, 0x1225: 0x6855, 0x1226: 0x6875, 0x1227: 0x0008, 0x1228: 0x0008, 0x1229: 0x0008,
+ 0x122a: 0x6895, 0x122b: 0x68b5, 0x122c: 0x68d5, 0x122d: 0x68f5, 0x122e: 0x6915, 0x122f: 0x6935,
+ 0x1230: 0x6955, 0x1231: 0x6975, 0x1232: 0x6995, 0x1233: 0x69b5, 0x1234: 0x69d5, 0x1235: 0x69f5,
+ 0x1236: 0x6a15, 0x1237: 0x6a35, 0x1238: 0x6a55, 0x1239: 0x6a75, 0x123a: 0x6a95, 0x123b: 0x6ab5,
+ 0x123c: 0x6ad5, 0x123d: 0x6af5, 0x123e: 0x6b15, 0x123f: 0x6b35,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x7a95, 0x1241: 0x7ab5, 0x1242: 0x7ad5, 0x1243: 0x7af5, 0x1244: 0x7b15, 0x1245: 0x7b35,
+ 0x1246: 0x7b55, 0x1247: 0x7b75, 0x1248: 0x7b95, 0x1249: 0x7bb5, 0x124a: 0x7bd5, 0x124b: 0x7bf5,
+ 0x124c: 0x7c15, 0x124d: 0x7c35, 0x124e: 0x7c55, 0x124f: 0x6ec9, 0x1250: 0x6ef1, 0x1251: 0x6f19,
+ 0x1252: 0x7c75, 0x1253: 0x7c95, 0x1254: 0x7cb5, 0x1255: 0x6f41, 0x1256: 0x6f69, 0x1257: 0x6f91,
+ 0x1258: 0x7cd5, 0x1259: 0x7cf5, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x0040,
+ 0x125e: 0x0040, 0x125f: 0x0040, 0x1260: 0x0040, 0x1261: 0x0040, 0x1262: 0x0040, 0x1263: 0x0040,
+ 0x1264: 0x0040, 0x1265: 0x0040, 0x1266: 0x0040, 0x1267: 0x0040, 0x1268: 0x0040, 0x1269: 0x0040,
+ 0x126a: 0x0040, 0x126b: 0x0040, 0x126c: 0x0040, 0x126d: 0x0040, 0x126e: 0x0040, 0x126f: 0x0040,
+ 0x1270: 0x0040, 0x1271: 0x0040, 0x1272: 0x0040, 0x1273: 0x0040, 0x1274: 0x0040, 0x1275: 0x0040,
+ 0x1276: 0x0040, 0x1277: 0x0040, 0x1278: 0x0040, 0x1279: 0x0040, 0x127a: 0x0040, 0x127b: 0x0040,
+ 0x127c: 0x0040, 0x127d: 0x0040, 0x127e: 0x0040, 0x127f: 0x0040,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x6fb9, 0x1281: 0x6fd1, 0x1282: 0x6fe9, 0x1283: 0x7d15, 0x1284: 0x7d35, 0x1285: 0x7001,
+ 0x1286: 0x7001, 0x1287: 0x0040, 0x1288: 0x0040, 0x1289: 0x0040, 0x128a: 0x0040, 0x128b: 0x0040,
+ 0x128c: 0x0040, 0x128d: 0x0040, 0x128e: 0x0040, 0x128f: 0x0040, 0x1290: 0x0040, 0x1291: 0x0040,
+ 0x1292: 0x0040, 0x1293: 0x7019, 0x1294: 0x7041, 0x1295: 0x7069, 0x1296: 0x7091, 0x1297: 0x70b9,
+ 0x1298: 0x0040, 0x1299: 0x0040, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x70e1,
+ 0x129e: 0x3308, 0x129f: 0x7109, 0x12a0: 0x7131, 0x12a1: 0x20a9, 0x12a2: 0x20f1, 0x12a3: 0x7149,
+ 0x12a4: 0x7161, 0x12a5: 0x7179, 0x12a6: 0x7191, 0x12a7: 0x71a9, 0x12a8: 0x71c1, 0x12a9: 0x1fb2,
+ 0x12aa: 0x71d9, 0x12ab: 0x7201, 0x12ac: 0x7229, 0x12ad: 0x7261, 0x12ae: 0x7299, 0x12af: 0x72c1,
+ 0x12b0: 0x72e9, 0x12b1: 0x7311, 0x12b2: 0x7339, 0x12b3: 0x7361, 0x12b4: 0x7389, 0x12b5: 0x73b1,
+ 0x12b6: 0x73d9, 0x12b7: 0x0040, 0x12b8: 0x7401, 0x12b9: 0x7429, 0x12ba: 0x7451, 0x12bb: 0x7479,
+ 0x12bc: 0x74a1, 0x12bd: 0x0040, 0x12be: 0x74c9, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x74f1, 0x12c1: 0x7519, 0x12c2: 0x0040, 0x12c3: 0x7541, 0x12c4: 0x7569, 0x12c5: 0x0040,
+ 0x12c6: 0x7591, 0x12c7: 0x75b9, 0x12c8: 0x75e1, 0x12c9: 0x7609, 0x12ca: 0x7631, 0x12cb: 0x7659,
+ 0x12cc: 0x7681, 0x12cd: 0x76a9, 0x12ce: 0x76d1, 0x12cf: 0x76f9, 0x12d0: 0x7721, 0x12d1: 0x7721,
+ 0x12d2: 0x7739, 0x12d3: 0x7739, 0x12d4: 0x7739, 0x12d5: 0x7739, 0x12d6: 0x7751, 0x12d7: 0x7751,
+ 0x12d8: 0x7751, 0x12d9: 0x7751, 0x12da: 0x7769, 0x12db: 0x7769, 0x12dc: 0x7769, 0x12dd: 0x7769,
+ 0x12de: 0x7781, 0x12df: 0x7781, 0x12e0: 0x7781, 0x12e1: 0x7781, 0x12e2: 0x7799, 0x12e3: 0x7799,
+ 0x12e4: 0x7799, 0x12e5: 0x7799, 0x12e6: 0x77b1, 0x12e7: 0x77b1, 0x12e8: 0x77b1, 0x12e9: 0x77b1,
+ 0x12ea: 0x77c9, 0x12eb: 0x77c9, 0x12ec: 0x77c9, 0x12ed: 0x77c9, 0x12ee: 0x77e1, 0x12ef: 0x77e1,
+ 0x12f0: 0x77e1, 0x12f1: 0x77e1, 0x12f2: 0x77f9, 0x12f3: 0x77f9, 0x12f4: 0x77f9, 0x12f5: 0x77f9,
+ 0x12f6: 0x7811, 0x12f7: 0x7811, 0x12f8: 0x7811, 0x12f9: 0x7811, 0x12fa: 0x7829, 0x12fb: 0x7829,
+ 0x12fc: 0x7829, 0x12fd: 0x7829, 0x12fe: 0x7841, 0x12ff: 0x7841,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x7841, 0x1301: 0x7841, 0x1302: 0x7859, 0x1303: 0x7859, 0x1304: 0x7871, 0x1305: 0x7871,
+ 0x1306: 0x7889, 0x1307: 0x7889, 0x1308: 0x78a1, 0x1309: 0x78a1, 0x130a: 0x78b9, 0x130b: 0x78b9,
+ 0x130c: 0x78d1, 0x130d: 0x78d1, 0x130e: 0x78e9, 0x130f: 0x78e9, 0x1310: 0x78e9, 0x1311: 0x78e9,
+ 0x1312: 0x7901, 0x1313: 0x7901, 0x1314: 0x7901, 0x1315: 0x7901, 0x1316: 0x7919, 0x1317: 0x7919,
+ 0x1318: 0x7919, 0x1319: 0x7919, 0x131a: 0x7931, 0x131b: 0x7931, 0x131c: 0x7931, 0x131d: 0x7931,
+ 0x131e: 0x7949, 0x131f: 0x7949, 0x1320: 0x7961, 0x1321: 0x7961, 0x1322: 0x7961, 0x1323: 0x7961,
+ 0x1324: 0x7979, 0x1325: 0x7979, 0x1326: 0x7991, 0x1327: 0x7991, 0x1328: 0x7991, 0x1329: 0x7991,
+ 0x132a: 0x79a9, 0x132b: 0x79a9, 0x132c: 0x79a9, 0x132d: 0x79a9, 0x132e: 0x79c1, 0x132f: 0x79c1,
+ 0x1330: 0x79d9, 0x1331: 0x79d9, 0x1332: 0x0818, 0x1333: 0x0818, 0x1334: 0x0818, 0x1335: 0x0818,
+ 0x1336: 0x0818, 0x1337: 0x0818, 0x1338: 0x0818, 0x1339: 0x0818, 0x133a: 0x0818, 0x133b: 0x0818,
+ 0x133c: 0x0818, 0x133d: 0x0818, 0x133e: 0x0818, 0x133f: 0x0818,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x0818, 0x1341: 0x0818, 0x1342: 0x0040, 0x1343: 0x0040, 0x1344: 0x0040, 0x1345: 0x0040,
+ 0x1346: 0x0040, 0x1347: 0x0040, 0x1348: 0x0040, 0x1349: 0x0040, 0x134a: 0x0040, 0x134b: 0x0040,
+ 0x134c: 0x0040, 0x134d: 0x0040, 0x134e: 0x0040, 0x134f: 0x0040, 0x1350: 0x0040, 0x1351: 0x0040,
+ 0x1352: 0x0040, 0x1353: 0x79f1, 0x1354: 0x79f1, 0x1355: 0x79f1, 0x1356: 0x79f1, 0x1357: 0x7a09,
+ 0x1358: 0x7a09, 0x1359: 0x7a21, 0x135a: 0x7a21, 0x135b: 0x7a39, 0x135c: 0x7a39, 0x135d: 0x0479,
+ 0x135e: 0x7a51, 0x135f: 0x7a51, 0x1360: 0x7a69, 0x1361: 0x7a69, 0x1362: 0x7a81, 0x1363: 0x7a81,
+ 0x1364: 0x7a99, 0x1365: 0x7a99, 0x1366: 0x7a99, 0x1367: 0x7a99, 0x1368: 0x7ab1, 0x1369: 0x7ab1,
+ 0x136a: 0x7ac9, 0x136b: 0x7ac9, 0x136c: 0x7af1, 0x136d: 0x7af1, 0x136e: 0x7b19, 0x136f: 0x7b19,
+ 0x1370: 0x7b41, 0x1371: 0x7b41, 0x1372: 0x7b69, 0x1373: 0x7b69, 0x1374: 0x7b91, 0x1375: 0x7b91,
+ 0x1376: 0x7bb9, 0x1377: 0x7bb9, 0x1378: 0x7bb9, 0x1379: 0x7be1, 0x137a: 0x7be1, 0x137b: 0x7be1,
+ 0x137c: 0x7c09, 0x137d: 0x7c09, 0x137e: 0x7c09, 0x137f: 0x7c09,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x85f9, 0x1381: 0x8621, 0x1382: 0x8649, 0x1383: 0x8671, 0x1384: 0x8699, 0x1385: 0x86c1,
+ 0x1386: 0x86e9, 0x1387: 0x8711, 0x1388: 0x8739, 0x1389: 0x8761, 0x138a: 0x8789, 0x138b: 0x87b1,
+ 0x138c: 0x87d9, 0x138d: 0x8801, 0x138e: 0x8829, 0x138f: 0x8851, 0x1390: 0x8879, 0x1391: 0x88a1,
+ 0x1392: 0x88c9, 0x1393: 0x88f1, 0x1394: 0x8919, 0x1395: 0x8941, 0x1396: 0x8969, 0x1397: 0x8991,
+ 0x1398: 0x89b9, 0x1399: 0x89e1, 0x139a: 0x8a09, 0x139b: 0x8a31, 0x139c: 0x8a59, 0x139d: 0x8a81,
+ 0x139e: 0x8aaa, 0x139f: 0x8ada, 0x13a0: 0x8b0a, 0x13a1: 0x8b3a, 0x13a2: 0x8b6a, 0x13a3: 0x8b9a,
+ 0x13a4: 0x8bc9, 0x13a5: 0x8bf1, 0x13a6: 0x7c71, 0x13a7: 0x8c19, 0x13a8: 0x7be1, 0x13a9: 0x7c99,
+ 0x13aa: 0x8c41, 0x13ab: 0x8c69, 0x13ac: 0x7d39, 0x13ad: 0x8c91, 0x13ae: 0x7d61, 0x13af: 0x7d89,
+ 0x13b0: 0x8cb9, 0x13b1: 0x8ce1, 0x13b2: 0x7e29, 0x13b3: 0x8d09, 0x13b4: 0x7e51, 0x13b5: 0x7e79,
+ 0x13b6: 0x8d31, 0x13b7: 0x8d59, 0x13b8: 0x7ec9, 0x13b9: 0x8d81, 0x13ba: 0x7ef1, 0x13bb: 0x7f19,
+ 0x13bc: 0x83a1, 0x13bd: 0x83c9, 0x13be: 0x8441, 0x13bf: 0x8469,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x8491, 0x13c1: 0x8531, 0x13c2: 0x8559, 0x13c3: 0x8581, 0x13c4: 0x85a9, 0x13c5: 0x8649,
+ 0x13c6: 0x8671, 0x13c7: 0x8699, 0x13c8: 0x8da9, 0x13c9: 0x8739, 0x13ca: 0x8dd1, 0x13cb: 0x8df9,
+ 0x13cc: 0x8829, 0x13cd: 0x8e21, 0x13ce: 0x8851, 0x13cf: 0x8879, 0x13d0: 0x8a81, 0x13d1: 0x8e49,
+ 0x13d2: 0x8e71, 0x13d3: 0x89b9, 0x13d4: 0x8e99, 0x13d5: 0x89e1, 0x13d6: 0x8a09, 0x13d7: 0x7c21,
+ 0x13d8: 0x7c49, 0x13d9: 0x8ec1, 0x13da: 0x7c71, 0x13db: 0x8ee9, 0x13dc: 0x7cc1, 0x13dd: 0x7ce9,
+ 0x13de: 0x7d11, 0x13df: 0x7d39, 0x13e0: 0x8f11, 0x13e1: 0x7db1, 0x13e2: 0x7dd9, 0x13e3: 0x7e01,
+ 0x13e4: 0x7e29, 0x13e5: 0x8f39, 0x13e6: 0x7ec9, 0x13e7: 0x7f41, 0x13e8: 0x7f69, 0x13e9: 0x7f91,
+ 0x13ea: 0x7fb9, 0x13eb: 0x7fe1, 0x13ec: 0x8031, 0x13ed: 0x8059, 0x13ee: 0x8081, 0x13ef: 0x80a9,
+ 0x13f0: 0x80d1, 0x13f1: 0x80f9, 0x13f2: 0x8f61, 0x13f3: 0x8121, 0x13f4: 0x8149, 0x13f5: 0x8171,
+ 0x13f6: 0x8199, 0x13f7: 0x81c1, 0x13f8: 0x81e9, 0x13f9: 0x8239, 0x13fa: 0x8261, 0x13fb: 0x8289,
+ 0x13fc: 0x82b1, 0x13fd: 0x82d9, 0x13fe: 0x8301, 0x13ff: 0x8329,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8351, 0x1401: 0x8379, 0x1402: 0x83f1, 0x1403: 0x8419, 0x1404: 0x84b9, 0x1405: 0x84e1,
+ 0x1406: 0x8509, 0x1407: 0x8531, 0x1408: 0x8559, 0x1409: 0x85d1, 0x140a: 0x85f9, 0x140b: 0x8621,
+ 0x140c: 0x8649, 0x140d: 0x8f89, 0x140e: 0x86c1, 0x140f: 0x86e9, 0x1410: 0x8711, 0x1411: 0x8739,
+ 0x1412: 0x87b1, 0x1413: 0x87d9, 0x1414: 0x8801, 0x1415: 0x8829, 0x1416: 0x8fb1, 0x1417: 0x88a1,
+ 0x1418: 0x88c9, 0x1419: 0x8fd9, 0x141a: 0x8941, 0x141b: 0x8969, 0x141c: 0x8991, 0x141d: 0x89b9,
+ 0x141e: 0x9001, 0x141f: 0x7c71, 0x1420: 0x8ee9, 0x1421: 0x7d39, 0x1422: 0x8f11, 0x1423: 0x7e29,
+ 0x1424: 0x8f39, 0x1425: 0x7ec9, 0x1426: 0x9029, 0x1427: 0x80d1, 0x1428: 0x9051, 0x1429: 0x9079,
+ 0x142a: 0x90a1, 0x142b: 0x8531, 0x142c: 0x8559, 0x142d: 0x8649, 0x142e: 0x8829, 0x142f: 0x8fb1,
+ 0x1430: 0x89b9, 0x1431: 0x9001, 0x1432: 0x90c9, 0x1433: 0x9101, 0x1434: 0x9139, 0x1435: 0x9171,
+ 0x1436: 0x9199, 0x1437: 0x91c1, 0x1438: 0x91e9, 0x1439: 0x9211, 0x143a: 0x9239, 0x143b: 0x9261,
+ 0x143c: 0x9289, 0x143d: 0x92b1, 0x143e: 0x92d9, 0x143f: 0x9301,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x9329, 0x1441: 0x9351, 0x1442: 0x9379, 0x1443: 0x93a1, 0x1444: 0x93c9, 0x1445: 0x93f1,
+ 0x1446: 0x9419, 0x1447: 0x9441, 0x1448: 0x9469, 0x1449: 0x9491, 0x144a: 0x94b9, 0x144b: 0x94e1,
+ 0x144c: 0x9079, 0x144d: 0x9509, 0x144e: 0x9531, 0x144f: 0x9559, 0x1450: 0x9581, 0x1451: 0x9171,
+ 0x1452: 0x9199, 0x1453: 0x91c1, 0x1454: 0x91e9, 0x1455: 0x9211, 0x1456: 0x9239, 0x1457: 0x9261,
+ 0x1458: 0x9289, 0x1459: 0x92b1, 0x145a: 0x92d9, 0x145b: 0x9301, 0x145c: 0x9329, 0x145d: 0x9351,
+ 0x145e: 0x9379, 0x145f: 0x93a1, 0x1460: 0x93c9, 0x1461: 0x93f1, 0x1462: 0x9419, 0x1463: 0x9441,
+ 0x1464: 0x9469, 0x1465: 0x9491, 0x1466: 0x94b9, 0x1467: 0x94e1, 0x1468: 0x9079, 0x1469: 0x9509,
+ 0x146a: 0x9531, 0x146b: 0x9559, 0x146c: 0x9581, 0x146d: 0x9491, 0x146e: 0x94b9, 0x146f: 0x94e1,
+ 0x1470: 0x9079, 0x1471: 0x9051, 0x1472: 0x90a1, 0x1473: 0x8211, 0x1474: 0x8059, 0x1475: 0x8081,
+ 0x1476: 0x80a9, 0x1477: 0x9491, 0x1478: 0x94b9, 0x1479: 0x94e1, 0x147a: 0x8211, 0x147b: 0x8239,
+ 0x147c: 0x95a9, 0x147d: 0x95a9, 0x147e: 0x0018, 0x147f: 0x0018,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x0040, 0x1481: 0x0040, 0x1482: 0x0040, 0x1483: 0x0040, 0x1484: 0x0040, 0x1485: 0x0040,
+ 0x1486: 0x0040, 0x1487: 0x0040, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040,
+ 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x95d1, 0x1491: 0x9609,
+ 0x1492: 0x9609, 0x1493: 0x9641, 0x1494: 0x9679, 0x1495: 0x96b1, 0x1496: 0x96e9, 0x1497: 0x9721,
+ 0x1498: 0x9759, 0x1499: 0x9759, 0x149a: 0x9791, 0x149b: 0x97c9, 0x149c: 0x9801, 0x149d: 0x9839,
+ 0x149e: 0x9871, 0x149f: 0x98a9, 0x14a0: 0x98a9, 0x14a1: 0x98e1, 0x14a2: 0x9919, 0x14a3: 0x9919,
+ 0x14a4: 0x9951, 0x14a5: 0x9951, 0x14a6: 0x9989, 0x14a7: 0x99c1, 0x14a8: 0x99c1, 0x14a9: 0x99f9,
+ 0x14aa: 0x9a31, 0x14ab: 0x9a31, 0x14ac: 0x9a69, 0x14ad: 0x9a69, 0x14ae: 0x9aa1, 0x14af: 0x9ad9,
+ 0x14b0: 0x9ad9, 0x14b1: 0x9b11, 0x14b2: 0x9b11, 0x14b3: 0x9b49, 0x14b4: 0x9b81, 0x14b5: 0x9bb9,
+ 0x14b6: 0x9bf1, 0x14b7: 0x9bf1, 0x14b8: 0x9c29, 0x14b9: 0x9c61, 0x14ba: 0x9c99, 0x14bb: 0x9cd1,
+ 0x14bc: 0x9d09, 0x14bd: 0x9d09, 0x14be: 0x9d41, 0x14bf: 0x9d79,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0xa949, 0x14c1: 0xa981, 0x14c2: 0xa9b9, 0x14c3: 0xa8a1, 0x14c4: 0x9bb9, 0x14c5: 0x9989,
+ 0x14c6: 0xa9f1, 0x14c7: 0xaa29, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x0040, 0x14d1: 0x0040,
+ 0x14d2: 0x0040, 0x14d3: 0x0040, 0x14d4: 0x0040, 0x14d5: 0x0040, 0x14d6: 0x0040, 0x14d7: 0x0040,
+ 0x14d8: 0x0040, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040,
+ 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x0040, 0x14e1: 0x0040, 0x14e2: 0x0040, 0x14e3: 0x0040,
+ 0x14e4: 0x0040, 0x14e5: 0x0040, 0x14e6: 0x0040, 0x14e7: 0x0040, 0x14e8: 0x0040, 0x14e9: 0x0040,
+ 0x14ea: 0x0040, 0x14eb: 0x0040, 0x14ec: 0x0040, 0x14ed: 0x0040, 0x14ee: 0x0040, 0x14ef: 0x0040,
+ 0x14f0: 0xaa61, 0x14f1: 0xaa99, 0x14f2: 0xaad1, 0x14f3: 0xab19, 0x14f4: 0xab61, 0x14f5: 0xaba9,
+ 0x14f6: 0xabf1, 0x14f7: 0xac39, 0x14f8: 0xac81, 0x14f9: 0xacc9, 0x14fa: 0xad02, 0x14fb: 0xae12,
+ 0x14fc: 0xae91, 0x14fd: 0x0018, 0x14fe: 0x0040, 0x14ff: 0x0040,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x33c0, 0x1501: 0x33c0, 0x1502: 0x33c0, 0x1503: 0x33c0, 0x1504: 0x33c0, 0x1505: 0x33c0,
+ 0x1506: 0x33c0, 0x1507: 0x33c0, 0x1508: 0x33c0, 0x1509: 0x33c0, 0x150a: 0x33c0, 0x150b: 0x33c0,
+ 0x150c: 0x33c0, 0x150d: 0x33c0, 0x150e: 0x33c0, 0x150f: 0x33c0, 0x1510: 0xaeda, 0x1511: 0x7d55,
+ 0x1512: 0x0040, 0x1513: 0xaeea, 0x1514: 0x03c2, 0x1515: 0xaefa, 0x1516: 0xaf0a, 0x1517: 0x7d75,
+ 0x1518: 0x7d95, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x3308, 0x1521: 0x3308, 0x1522: 0x3308, 0x1523: 0x3308,
+ 0x1524: 0x3308, 0x1525: 0x3308, 0x1526: 0x3308, 0x1527: 0x3308, 0x1528: 0x3308, 0x1529: 0x3308,
+ 0x152a: 0x3308, 0x152b: 0x3308, 0x152c: 0x3308, 0x152d: 0x3308, 0x152e: 0x3308, 0x152f: 0x3308,
+ 0x1530: 0x0040, 0x1531: 0x7db5, 0x1532: 0x7dd5, 0x1533: 0xaf1a, 0x1534: 0xaf1a, 0x1535: 0x1fd2,
+ 0x1536: 0x1fe2, 0x1537: 0xaf2a, 0x1538: 0xaf3a, 0x1539: 0x7df5, 0x153a: 0x7e15, 0x153b: 0x7e35,
+ 0x153c: 0x7df5, 0x153d: 0x7e55, 0x153e: 0x7e75, 0x153f: 0x7e55,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x7e95, 0x1541: 0x7eb5, 0x1542: 0x7ed5, 0x1543: 0x7eb5, 0x1544: 0x7ef5, 0x1545: 0x0018,
+ 0x1546: 0x0018, 0x1547: 0xaf4a, 0x1548: 0xaf5a, 0x1549: 0x7f16, 0x154a: 0x7f36, 0x154b: 0x7f56,
+ 0x154c: 0x7f76, 0x154d: 0xaf1a, 0x154e: 0xaf1a, 0x154f: 0xaf1a, 0x1550: 0xaeda, 0x1551: 0x7f95,
+ 0x1552: 0x0040, 0x1553: 0x0040, 0x1554: 0x03c2, 0x1555: 0xaeea, 0x1556: 0xaf0a, 0x1557: 0xaefa,
+ 0x1558: 0x7fb5, 0x1559: 0x1fd2, 0x155a: 0x1fe2, 0x155b: 0xaf2a, 0x155c: 0xaf3a, 0x155d: 0x7e95,
+ 0x155e: 0x7ef5, 0x155f: 0xaf6a, 0x1560: 0xaf7a, 0x1561: 0xaf8a, 0x1562: 0x1fb2, 0x1563: 0xaf99,
+ 0x1564: 0xafaa, 0x1565: 0xafba, 0x1566: 0x1fc2, 0x1567: 0x0040, 0x1568: 0xafca, 0x1569: 0xafda,
+ 0x156a: 0xafea, 0x156b: 0xaffa, 0x156c: 0x0040, 0x156d: 0x0040, 0x156e: 0x0040, 0x156f: 0x0040,
+ 0x1570: 0x7fd6, 0x1571: 0xb009, 0x1572: 0x7ff6, 0x1573: 0x0808, 0x1574: 0x8016, 0x1575: 0x0040,
+ 0x1576: 0x8036, 0x1577: 0xb031, 0x1578: 0x8056, 0x1579: 0xb059, 0x157a: 0x8076, 0x157b: 0xb081,
+ 0x157c: 0x8096, 0x157d: 0xb0a9, 0x157e: 0x80b6, 0x157f: 0xb0d1,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0xb0f9, 0x1581: 0xb111, 0x1582: 0xb111, 0x1583: 0xb129, 0x1584: 0xb129, 0x1585: 0xb141,
+ 0x1586: 0xb141, 0x1587: 0xb159, 0x1588: 0xb159, 0x1589: 0xb171, 0x158a: 0xb171, 0x158b: 0xb171,
+ 0x158c: 0xb171, 0x158d: 0xb189, 0x158e: 0xb189, 0x158f: 0xb1a1, 0x1590: 0xb1a1, 0x1591: 0xb1a1,
+ 0x1592: 0xb1a1, 0x1593: 0xb1b9, 0x1594: 0xb1b9, 0x1595: 0xb1d1, 0x1596: 0xb1d1, 0x1597: 0xb1d1,
+ 0x1598: 0xb1d1, 0x1599: 0xb1e9, 0x159a: 0xb1e9, 0x159b: 0xb1e9, 0x159c: 0xb1e9, 0x159d: 0xb201,
+ 0x159e: 0xb201, 0x159f: 0xb201, 0x15a0: 0xb201, 0x15a1: 0xb219, 0x15a2: 0xb219, 0x15a3: 0xb219,
+ 0x15a4: 0xb219, 0x15a5: 0xb231, 0x15a6: 0xb231, 0x15a7: 0xb231, 0x15a8: 0xb231, 0x15a9: 0xb249,
+ 0x15aa: 0xb249, 0x15ab: 0xb261, 0x15ac: 0xb261, 0x15ad: 0xb279, 0x15ae: 0xb279, 0x15af: 0xb291,
+ 0x15b0: 0xb291, 0x15b1: 0xb2a9, 0x15b2: 0xb2a9, 0x15b3: 0xb2a9, 0x15b4: 0xb2a9, 0x15b5: 0xb2c1,
+ 0x15b6: 0xb2c1, 0x15b7: 0xb2c1, 0x15b8: 0xb2c1, 0x15b9: 0xb2d9, 0x15ba: 0xb2d9, 0x15bb: 0xb2d9,
+ 0x15bc: 0xb2d9, 0x15bd: 0xb2f1, 0x15be: 0xb2f1, 0x15bf: 0xb2f1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb2f1, 0x15c1: 0xb309, 0x15c2: 0xb309, 0x15c3: 0xb309, 0x15c4: 0xb309, 0x15c5: 0xb321,
+ 0x15c6: 0xb321, 0x15c7: 0xb321, 0x15c8: 0xb321, 0x15c9: 0xb339, 0x15ca: 0xb339, 0x15cb: 0xb339,
+ 0x15cc: 0xb339, 0x15cd: 0xb351, 0x15ce: 0xb351, 0x15cf: 0xb351, 0x15d0: 0xb351, 0x15d1: 0xb369,
+ 0x15d2: 0xb369, 0x15d3: 0xb369, 0x15d4: 0xb369, 0x15d5: 0xb381, 0x15d6: 0xb381, 0x15d7: 0xb381,
+ 0x15d8: 0xb381, 0x15d9: 0xb399, 0x15da: 0xb399, 0x15db: 0xb399, 0x15dc: 0xb399, 0x15dd: 0xb3b1,
+ 0x15de: 0xb3b1, 0x15df: 0xb3b1, 0x15e0: 0xb3b1, 0x15e1: 0xb3c9, 0x15e2: 0xb3c9, 0x15e3: 0xb3c9,
+ 0x15e4: 0xb3c9, 0x15e5: 0xb3e1, 0x15e6: 0xb3e1, 0x15e7: 0xb3e1, 0x15e8: 0xb3e1, 0x15e9: 0xb3f9,
+ 0x15ea: 0xb3f9, 0x15eb: 0xb3f9, 0x15ec: 0xb3f9, 0x15ed: 0xb411, 0x15ee: 0xb411, 0x15ef: 0x7ab1,
+ 0x15f0: 0x7ab1, 0x15f1: 0xb429, 0x15f2: 0xb429, 0x15f3: 0xb429, 0x15f4: 0xb429, 0x15f5: 0xb441,
+ 0x15f6: 0xb441, 0x15f7: 0xb469, 0x15f8: 0xb469, 0x15f9: 0xb491, 0x15fa: 0xb491, 0x15fb: 0xb4b9,
+ 0x15fc: 0xb4b9, 0x15fd: 0x0040, 0x15fe: 0x0040, 0x15ff: 0x03c0,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x0040, 0x1601: 0xaefa, 0x1602: 0xb4e2, 0x1603: 0xaf6a, 0x1604: 0xafda, 0x1605: 0xafea,
+ 0x1606: 0xaf7a, 0x1607: 0xb4f2, 0x1608: 0x1fd2, 0x1609: 0x1fe2, 0x160a: 0xaf8a, 0x160b: 0x1fb2,
+ 0x160c: 0xaeda, 0x160d: 0xaf99, 0x160e: 0x29d1, 0x160f: 0xb502, 0x1610: 0x1f41, 0x1611: 0x00c9,
+ 0x1612: 0x0069, 0x1613: 0x0079, 0x1614: 0x1f51, 0x1615: 0x1f61, 0x1616: 0x1f71, 0x1617: 0x1f81,
+ 0x1618: 0x1f91, 0x1619: 0x1fa1, 0x161a: 0xaeea, 0x161b: 0x03c2, 0x161c: 0xafaa, 0x161d: 0x1fc2,
+ 0x161e: 0xafba, 0x161f: 0xaf0a, 0x1620: 0xaffa, 0x1621: 0x0039, 0x1622: 0x0ee9, 0x1623: 0x1159,
+ 0x1624: 0x0ef9, 0x1625: 0x0f09, 0x1626: 0x1199, 0x1627: 0x0f31, 0x1628: 0x0249, 0x1629: 0x0f41,
+ 0x162a: 0x0259, 0x162b: 0x0f51, 0x162c: 0x0359, 0x162d: 0x0f61, 0x162e: 0x0f71, 0x162f: 0x00d9,
+ 0x1630: 0x0f99, 0x1631: 0x2039, 0x1632: 0x0269, 0x1633: 0x01d9, 0x1634: 0x0fa9, 0x1635: 0x0fb9,
+ 0x1636: 0x1089, 0x1637: 0x0279, 0x1638: 0x0369, 0x1639: 0x0289, 0x163a: 0x13d1, 0x163b: 0xaf4a,
+ 0x163c: 0xafca, 0x163d: 0xaf5a, 0x163e: 0xb512, 0x163f: 0xaf1a,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x1caa, 0x1641: 0x0039, 0x1642: 0x0ee9, 0x1643: 0x1159, 0x1644: 0x0ef9, 0x1645: 0x0f09,
+ 0x1646: 0x1199, 0x1647: 0x0f31, 0x1648: 0x0249, 0x1649: 0x0f41, 0x164a: 0x0259, 0x164b: 0x0f51,
+ 0x164c: 0x0359, 0x164d: 0x0f61, 0x164e: 0x0f71, 0x164f: 0x00d9, 0x1650: 0x0f99, 0x1651: 0x2039,
+ 0x1652: 0x0269, 0x1653: 0x01d9, 0x1654: 0x0fa9, 0x1655: 0x0fb9, 0x1656: 0x1089, 0x1657: 0x0279,
+ 0x1658: 0x0369, 0x1659: 0x0289, 0x165a: 0x13d1, 0x165b: 0xaf2a, 0x165c: 0xb522, 0x165d: 0xaf3a,
+ 0x165e: 0xb532, 0x165f: 0x80d5, 0x1660: 0x80f5, 0x1661: 0x29d1, 0x1662: 0x8115, 0x1663: 0x8115,
+ 0x1664: 0x8135, 0x1665: 0x8155, 0x1666: 0x8175, 0x1667: 0x8195, 0x1668: 0x81b5, 0x1669: 0x81d5,
+ 0x166a: 0x81f5, 0x166b: 0x8215, 0x166c: 0x8235, 0x166d: 0x8255, 0x166e: 0x8275, 0x166f: 0x8295,
+ 0x1670: 0x82b5, 0x1671: 0x82d5, 0x1672: 0x82f5, 0x1673: 0x8315, 0x1674: 0x8335, 0x1675: 0x8355,
+ 0x1676: 0x8375, 0x1677: 0x8395, 0x1678: 0x83b5, 0x1679: 0x83d5, 0x167a: 0x83f5, 0x167b: 0x8415,
+ 0x167c: 0x81b5, 0x167d: 0x8435, 0x167e: 0x8455, 0x167f: 0x8215,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x8475, 0x1681: 0x8495, 0x1682: 0x84b5, 0x1683: 0x84d5, 0x1684: 0x84f5, 0x1685: 0x8515,
+ 0x1686: 0x8535, 0x1687: 0x8555, 0x1688: 0x84d5, 0x1689: 0x8575, 0x168a: 0x84d5, 0x168b: 0x8595,
+ 0x168c: 0x8595, 0x168d: 0x85b5, 0x168e: 0x85b5, 0x168f: 0x85d5, 0x1690: 0x8515, 0x1691: 0x85f5,
+ 0x1692: 0x8615, 0x1693: 0x85f5, 0x1694: 0x8635, 0x1695: 0x8615, 0x1696: 0x8655, 0x1697: 0x8655,
+ 0x1698: 0x8675, 0x1699: 0x8675, 0x169a: 0x8695, 0x169b: 0x8695, 0x169c: 0x8615, 0x169d: 0x8115,
+ 0x169e: 0x86b5, 0x169f: 0x86d5, 0x16a0: 0x0040, 0x16a1: 0x86f5, 0x16a2: 0x8715, 0x16a3: 0x8735,
+ 0x16a4: 0x8755, 0x16a5: 0x8735, 0x16a6: 0x8775, 0x16a7: 0x8795, 0x16a8: 0x87b5, 0x16a9: 0x87b5,
+ 0x16aa: 0x87d5, 0x16ab: 0x87d5, 0x16ac: 0x87f5, 0x16ad: 0x87f5, 0x16ae: 0x87d5, 0x16af: 0x87d5,
+ 0x16b0: 0x8815, 0x16b1: 0x8835, 0x16b2: 0x8855, 0x16b3: 0x8875, 0x16b4: 0x8895, 0x16b5: 0x88b5,
+ 0x16b6: 0x88b5, 0x16b7: 0x88b5, 0x16b8: 0x88d5, 0x16b9: 0x88d5, 0x16ba: 0x88d5, 0x16bb: 0x88d5,
+ 0x16bc: 0x87b5, 0x16bd: 0x87b5, 0x16be: 0x87b5, 0x16bf: 0x0040,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x0040, 0x16c1: 0x0040, 0x16c2: 0x8715, 0x16c3: 0x86f5, 0x16c4: 0x88f5, 0x16c5: 0x86f5,
+ 0x16c6: 0x8715, 0x16c7: 0x86f5, 0x16c8: 0x0040, 0x16c9: 0x0040, 0x16ca: 0x8915, 0x16cb: 0x8715,
+ 0x16cc: 0x8935, 0x16cd: 0x88f5, 0x16ce: 0x8935, 0x16cf: 0x8715, 0x16d0: 0x0040, 0x16d1: 0x0040,
+ 0x16d2: 0x8955, 0x16d3: 0x8975, 0x16d4: 0x8875, 0x16d5: 0x8935, 0x16d6: 0x88f5, 0x16d7: 0x8935,
+ 0x16d8: 0x0040, 0x16d9: 0x0040, 0x16da: 0x8995, 0x16db: 0x89b5, 0x16dc: 0x8995, 0x16dd: 0x0040,
+ 0x16de: 0x0040, 0x16df: 0x0040, 0x16e0: 0xb541, 0x16e1: 0xb559, 0x16e2: 0xb571, 0x16e3: 0x89d6,
+ 0x16e4: 0xb589, 0x16e5: 0xb5a1, 0x16e6: 0x89f5, 0x16e7: 0x0040, 0x16e8: 0x8a15, 0x16e9: 0x8a35,
+ 0x16ea: 0x8a55, 0x16eb: 0x8a35, 0x16ec: 0x8a75, 0x16ed: 0x8a95, 0x16ee: 0x8ab5, 0x16ef: 0x0040,
+ 0x16f0: 0x0040, 0x16f1: 0x0040, 0x16f2: 0x0040, 0x16f3: 0x0040, 0x16f4: 0x0040, 0x16f5: 0x0040,
+ 0x16f6: 0x0040, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0340, 0x16fa: 0x0340, 0x16fb: 0x0340,
+ 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0a08, 0x1701: 0x0a08, 0x1702: 0x0a08, 0x1703: 0x0a08, 0x1704: 0x0a08, 0x1705: 0x0c08,
+ 0x1706: 0x0808, 0x1707: 0x0c08, 0x1708: 0x0818, 0x1709: 0x0c08, 0x170a: 0x0c08, 0x170b: 0x0808,
+ 0x170c: 0x0808, 0x170d: 0x0908, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0c08, 0x1711: 0x0c08,
+ 0x1712: 0x0c08, 0x1713: 0x0a08, 0x1714: 0x0a08, 0x1715: 0x0a08, 0x1716: 0x0a08, 0x1717: 0x0908,
+ 0x1718: 0x0a08, 0x1719: 0x0a08, 0x171a: 0x0a08, 0x171b: 0x0a08, 0x171c: 0x0a08, 0x171d: 0x0c08,
+ 0x171e: 0x0a08, 0x171f: 0x0a08, 0x1720: 0x0a08, 0x1721: 0x0c08, 0x1722: 0x0808, 0x1723: 0x0808,
+ 0x1724: 0x0c08, 0x1725: 0x3308, 0x1726: 0x3308, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0040,
+ 0x172a: 0x0040, 0x172b: 0x0a18, 0x172c: 0x0a18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0c18,
+ 0x1730: 0x0818, 0x1731: 0x0818, 0x1732: 0x0818, 0x1733: 0x0818, 0x1734: 0x0818, 0x1735: 0x0818,
+ 0x1736: 0x0818, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0c08, 0x1742: 0x0a08, 0x1743: 0x0c08, 0x1744: 0x0c08, 0x1745: 0x0c08,
+ 0x1746: 0x0a08, 0x1747: 0x0a08, 0x1748: 0x0a08, 0x1749: 0x0c08, 0x174a: 0x0a08, 0x174b: 0x0a08,
+ 0x174c: 0x0c08, 0x174d: 0x0a08, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0a08, 0x1751: 0x0c08,
+ 0x1752: 0x0040, 0x1753: 0x0040, 0x1754: 0x0040, 0x1755: 0x0040, 0x1756: 0x0040, 0x1757: 0x0040,
+ 0x1758: 0x0040, 0x1759: 0x0818, 0x175a: 0x0818, 0x175b: 0x0818, 0x175c: 0x0818, 0x175d: 0x0040,
+ 0x175e: 0x0040, 0x175f: 0x0040, 0x1760: 0x0040, 0x1761: 0x0040, 0x1762: 0x0040, 0x1763: 0x0040,
+ 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0c18,
+ 0x176a: 0x0c18, 0x176b: 0x0c18, 0x176c: 0x0c18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0818,
+ 0x1770: 0x0040, 0x1771: 0x0040, 0x1772: 0x0040, 0x1773: 0x0040, 0x1774: 0x0040, 0x1775: 0x0040,
+ 0x1776: 0x0040, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x3308, 0x1781: 0x3308, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x0040, 0x1785: 0x0008,
+ 0x1786: 0x0008, 0x1787: 0x0008, 0x1788: 0x0008, 0x1789: 0x0008, 0x178a: 0x0008, 0x178b: 0x0008,
+ 0x178c: 0x0008, 0x178d: 0x0040, 0x178e: 0x0040, 0x178f: 0x0008, 0x1790: 0x0008, 0x1791: 0x0040,
+ 0x1792: 0x0040, 0x1793: 0x0008, 0x1794: 0x0008, 0x1795: 0x0008, 0x1796: 0x0008, 0x1797: 0x0008,
+ 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008,
+ 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0008, 0x17a3: 0x0008,
+ 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0040,
+ 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008,
+ 0x17b0: 0x0008, 0x17b1: 0x0040, 0x17b2: 0x0008, 0x17b3: 0x0008, 0x17b4: 0x0040, 0x17b5: 0x0008,
+ 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x3308, 0x17bd: 0x0008, 0x17be: 0x3008, 0x17bf: 0x3008,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3008, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x3008, 0x17c5: 0x0040,
+ 0x17c6: 0x0040, 0x17c7: 0x3008, 0x17c8: 0x3008, 0x17c9: 0x0040, 0x17ca: 0x0040, 0x17cb: 0x3008,
+ 0x17cc: 0x3008, 0x17cd: 0x3808, 0x17ce: 0x0040, 0x17cf: 0x0040, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0040, 0x17d4: 0x0040, 0x17d5: 0x0040, 0x17d6: 0x0040, 0x17d7: 0x3008,
+ 0x17d8: 0x0040, 0x17d9: 0x0040, 0x17da: 0x0040, 0x17db: 0x0040, 0x17dc: 0x0040, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x3008, 0x17e3: 0x3008,
+ 0x17e4: 0x0040, 0x17e5: 0x0040, 0x17e6: 0x3308, 0x17e7: 0x3308, 0x17e8: 0x3308, 0x17e9: 0x3308,
+ 0x17ea: 0x3308, 0x17eb: 0x3308, 0x17ec: 0x3308, 0x17ed: 0x0040, 0x17ee: 0x0040, 0x17ef: 0x0040,
+ 0x17f0: 0x3308, 0x17f1: 0x3308, 0x17f2: 0x3308, 0x17f3: 0x3308, 0x17f4: 0x3308, 0x17f5: 0x0040,
+ 0x17f6: 0x0040, 0x17f7: 0x0040, 0x17f8: 0x0040, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x0040,
+ 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x0039, 0x1801: 0x0ee9, 0x1802: 0x1159, 0x1803: 0x0ef9, 0x1804: 0x0f09, 0x1805: 0x1199,
+ 0x1806: 0x0f31, 0x1807: 0x0249, 0x1808: 0x0f41, 0x1809: 0x0259, 0x180a: 0x0f51, 0x180b: 0x0359,
+ 0x180c: 0x0f61, 0x180d: 0x0f71, 0x180e: 0x00d9, 0x180f: 0x0f99, 0x1810: 0x2039, 0x1811: 0x0269,
+ 0x1812: 0x01d9, 0x1813: 0x0fa9, 0x1814: 0x0fb9, 0x1815: 0x1089, 0x1816: 0x0279, 0x1817: 0x0369,
+ 0x1818: 0x0289, 0x1819: 0x13d1, 0x181a: 0x0039, 0x181b: 0x0ee9, 0x181c: 0x1159, 0x181d: 0x0ef9,
+ 0x181e: 0x0f09, 0x181f: 0x1199, 0x1820: 0x0f31, 0x1821: 0x0249, 0x1822: 0x0f41, 0x1823: 0x0259,
+ 0x1824: 0x0f51, 0x1825: 0x0359, 0x1826: 0x0f61, 0x1827: 0x0f71, 0x1828: 0x00d9, 0x1829: 0x0f99,
+ 0x182a: 0x2039, 0x182b: 0x0269, 0x182c: 0x01d9, 0x182d: 0x0fa9, 0x182e: 0x0fb9, 0x182f: 0x1089,
+ 0x1830: 0x0279, 0x1831: 0x0369, 0x1832: 0x0289, 0x1833: 0x13d1, 0x1834: 0x0039, 0x1835: 0x0ee9,
+ 0x1836: 0x1159, 0x1837: 0x0ef9, 0x1838: 0x0f09, 0x1839: 0x1199, 0x183a: 0x0f31, 0x183b: 0x0249,
+ 0x183c: 0x0f41, 0x183d: 0x0259, 0x183e: 0x0f51, 0x183f: 0x0359,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0f61, 0x1841: 0x0f71, 0x1842: 0x00d9, 0x1843: 0x0f99, 0x1844: 0x2039, 0x1845: 0x0269,
+ 0x1846: 0x01d9, 0x1847: 0x0fa9, 0x1848: 0x0fb9, 0x1849: 0x1089, 0x184a: 0x0279, 0x184b: 0x0369,
+ 0x184c: 0x0289, 0x184d: 0x13d1, 0x184e: 0x0039, 0x184f: 0x0ee9, 0x1850: 0x1159, 0x1851: 0x0ef9,
+ 0x1852: 0x0f09, 0x1853: 0x1199, 0x1854: 0x0f31, 0x1855: 0x0040, 0x1856: 0x0f41, 0x1857: 0x0259,
+ 0x1858: 0x0f51, 0x1859: 0x0359, 0x185a: 0x0f61, 0x185b: 0x0f71, 0x185c: 0x00d9, 0x185d: 0x0f99,
+ 0x185e: 0x2039, 0x185f: 0x0269, 0x1860: 0x01d9, 0x1861: 0x0fa9, 0x1862: 0x0fb9, 0x1863: 0x1089,
+ 0x1864: 0x0279, 0x1865: 0x0369, 0x1866: 0x0289, 0x1867: 0x13d1, 0x1868: 0x0039, 0x1869: 0x0ee9,
+ 0x186a: 0x1159, 0x186b: 0x0ef9, 0x186c: 0x0f09, 0x186d: 0x1199, 0x186e: 0x0f31, 0x186f: 0x0249,
+ 0x1870: 0x0f41, 0x1871: 0x0259, 0x1872: 0x0f51, 0x1873: 0x0359, 0x1874: 0x0f61, 0x1875: 0x0f71,
+ 0x1876: 0x00d9, 0x1877: 0x0f99, 0x1878: 0x2039, 0x1879: 0x0269, 0x187a: 0x01d9, 0x187b: 0x0fa9,
+ 0x187c: 0x0fb9, 0x187d: 0x1089, 0x187e: 0x0279, 0x187f: 0x0369,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0289, 0x1881: 0x13d1, 0x1882: 0x0039, 0x1883: 0x0ee9, 0x1884: 0x1159, 0x1885: 0x0ef9,
+ 0x1886: 0x0f09, 0x1887: 0x1199, 0x1888: 0x0f31, 0x1889: 0x0249, 0x188a: 0x0f41, 0x188b: 0x0259,
+ 0x188c: 0x0f51, 0x188d: 0x0359, 0x188e: 0x0f61, 0x188f: 0x0f71, 0x1890: 0x00d9, 0x1891: 0x0f99,
+ 0x1892: 0x2039, 0x1893: 0x0269, 0x1894: 0x01d9, 0x1895: 0x0fa9, 0x1896: 0x0fb9, 0x1897: 0x1089,
+ 0x1898: 0x0279, 0x1899: 0x0369, 0x189a: 0x0289, 0x189b: 0x13d1, 0x189c: 0x0039, 0x189d: 0x0040,
+ 0x189e: 0x1159, 0x189f: 0x0ef9, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0f31, 0x18a3: 0x0040,
+ 0x18a4: 0x0040, 0x18a5: 0x0259, 0x18a6: 0x0f51, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0f71,
+ 0x18aa: 0x00d9, 0x18ab: 0x0f99, 0x18ac: 0x2039, 0x18ad: 0x0040, 0x18ae: 0x01d9, 0x18af: 0x0fa9,
+ 0x18b0: 0x0fb9, 0x18b1: 0x1089, 0x18b2: 0x0279, 0x18b3: 0x0369, 0x18b4: 0x0289, 0x18b5: 0x13d1,
+ 0x18b6: 0x0039, 0x18b7: 0x0ee9, 0x18b8: 0x1159, 0x18b9: 0x0ef9, 0x18ba: 0x0040, 0x18bb: 0x1199,
+ 0x18bc: 0x0040, 0x18bd: 0x0249, 0x18be: 0x0f41, 0x18bf: 0x0259,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0f51, 0x18c1: 0x0359, 0x18c2: 0x0f61, 0x18c3: 0x0f71, 0x18c4: 0x0040, 0x18c5: 0x0f99,
+ 0x18c6: 0x2039, 0x18c7: 0x0269, 0x18c8: 0x01d9, 0x18c9: 0x0fa9, 0x18ca: 0x0fb9, 0x18cb: 0x1089,
+ 0x18cc: 0x0279, 0x18cd: 0x0369, 0x18ce: 0x0289, 0x18cf: 0x13d1, 0x18d0: 0x0039, 0x18d1: 0x0ee9,
+ 0x18d2: 0x1159, 0x18d3: 0x0ef9, 0x18d4: 0x0f09, 0x18d5: 0x1199, 0x18d6: 0x0f31, 0x18d7: 0x0249,
+ 0x18d8: 0x0f41, 0x18d9: 0x0259, 0x18da: 0x0f51, 0x18db: 0x0359, 0x18dc: 0x0f61, 0x18dd: 0x0f71,
+ 0x18de: 0x00d9, 0x18df: 0x0f99, 0x18e0: 0x2039, 0x18e1: 0x0269, 0x18e2: 0x01d9, 0x18e3: 0x0fa9,
+ 0x18e4: 0x0fb9, 0x18e5: 0x1089, 0x18e6: 0x0279, 0x18e7: 0x0369, 0x18e8: 0x0289, 0x18e9: 0x13d1,
+ 0x18ea: 0x0039, 0x18eb: 0x0ee9, 0x18ec: 0x1159, 0x18ed: 0x0ef9, 0x18ee: 0x0f09, 0x18ef: 0x1199,
+ 0x18f0: 0x0f31, 0x18f1: 0x0249, 0x18f2: 0x0f41, 0x18f3: 0x0259, 0x18f4: 0x0f51, 0x18f5: 0x0359,
+ 0x18f6: 0x0f61, 0x18f7: 0x0f71, 0x18f8: 0x00d9, 0x18f9: 0x0f99, 0x18fa: 0x2039, 0x18fb: 0x0269,
+ 0x18fc: 0x01d9, 0x18fd: 0x0fa9, 0x18fe: 0x0fb9, 0x18ff: 0x1089,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0279, 0x1901: 0x0369, 0x1902: 0x0289, 0x1903: 0x13d1, 0x1904: 0x0039, 0x1905: 0x0ee9,
+ 0x1906: 0x0040, 0x1907: 0x0ef9, 0x1908: 0x0f09, 0x1909: 0x1199, 0x190a: 0x0f31, 0x190b: 0x0040,
+ 0x190c: 0x0040, 0x190d: 0x0259, 0x190e: 0x0f51, 0x190f: 0x0359, 0x1910: 0x0f61, 0x1911: 0x0f71,
+ 0x1912: 0x00d9, 0x1913: 0x0f99, 0x1914: 0x2039, 0x1915: 0x0040, 0x1916: 0x01d9, 0x1917: 0x0fa9,
+ 0x1918: 0x0fb9, 0x1919: 0x1089, 0x191a: 0x0279, 0x191b: 0x0369, 0x191c: 0x0289, 0x191d: 0x0040,
+ 0x191e: 0x0039, 0x191f: 0x0ee9, 0x1920: 0x1159, 0x1921: 0x0ef9, 0x1922: 0x0f09, 0x1923: 0x1199,
+ 0x1924: 0x0f31, 0x1925: 0x0249, 0x1926: 0x0f41, 0x1927: 0x0259, 0x1928: 0x0f51, 0x1929: 0x0359,
+ 0x192a: 0x0f61, 0x192b: 0x0f71, 0x192c: 0x00d9, 0x192d: 0x0f99, 0x192e: 0x2039, 0x192f: 0x0269,
+ 0x1930: 0x01d9, 0x1931: 0x0fa9, 0x1932: 0x0fb9, 0x1933: 0x1089, 0x1934: 0x0279, 0x1935: 0x0369,
+ 0x1936: 0x0289, 0x1937: 0x13d1, 0x1938: 0x0039, 0x1939: 0x0ee9, 0x193a: 0x0040, 0x193b: 0x0ef9,
+ 0x193c: 0x0f09, 0x193d: 0x1199, 0x193e: 0x0f31, 0x193f: 0x0040,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0f41, 0x1941: 0x0259, 0x1942: 0x0f51, 0x1943: 0x0359, 0x1944: 0x0f61, 0x1945: 0x0040,
+ 0x1946: 0x00d9, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x01d9, 0x194b: 0x0fa9,
+ 0x194c: 0x0fb9, 0x194d: 0x1089, 0x194e: 0x0279, 0x194f: 0x0369, 0x1950: 0x0289, 0x1951: 0x0040,
+ 0x1952: 0x0039, 0x1953: 0x0ee9, 0x1954: 0x1159, 0x1955: 0x0ef9, 0x1956: 0x0f09, 0x1957: 0x1199,
+ 0x1958: 0x0f31, 0x1959: 0x0249, 0x195a: 0x0f41, 0x195b: 0x0259, 0x195c: 0x0f51, 0x195d: 0x0359,
+ 0x195e: 0x0f61, 0x195f: 0x0f71, 0x1960: 0x00d9, 0x1961: 0x0f99, 0x1962: 0x2039, 0x1963: 0x0269,
+ 0x1964: 0x01d9, 0x1965: 0x0fa9, 0x1966: 0x0fb9, 0x1967: 0x1089, 0x1968: 0x0279, 0x1969: 0x0369,
+ 0x196a: 0x0289, 0x196b: 0x13d1, 0x196c: 0x0039, 0x196d: 0x0ee9, 0x196e: 0x1159, 0x196f: 0x0ef9,
+ 0x1970: 0x0f09, 0x1971: 0x1199, 0x1972: 0x0f31, 0x1973: 0x0249, 0x1974: 0x0f41, 0x1975: 0x0259,
+ 0x1976: 0x0f51, 0x1977: 0x0359, 0x1978: 0x0f61, 0x1979: 0x0f71, 0x197a: 0x00d9, 0x197b: 0x0f99,
+ 0x197c: 0x2039, 0x197d: 0x0269, 0x197e: 0x01d9, 0x197f: 0x0fa9,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0fb9, 0x1981: 0x1089, 0x1982: 0x0279, 0x1983: 0x0369, 0x1984: 0x0289, 0x1985: 0x13d1,
+ 0x1986: 0x0039, 0x1987: 0x0ee9, 0x1988: 0x1159, 0x1989: 0x0ef9, 0x198a: 0x0f09, 0x198b: 0x1199,
+ 0x198c: 0x0f31, 0x198d: 0x0249, 0x198e: 0x0f41, 0x198f: 0x0259, 0x1990: 0x0f51, 0x1991: 0x0359,
+ 0x1992: 0x0f61, 0x1993: 0x0f71, 0x1994: 0x00d9, 0x1995: 0x0f99, 0x1996: 0x2039, 0x1997: 0x0269,
+ 0x1998: 0x01d9, 0x1999: 0x0fa9, 0x199a: 0x0fb9, 0x199b: 0x1089, 0x199c: 0x0279, 0x199d: 0x0369,
+ 0x199e: 0x0289, 0x199f: 0x13d1, 0x19a0: 0x0039, 0x19a1: 0x0ee9, 0x19a2: 0x1159, 0x19a3: 0x0ef9,
+ 0x19a4: 0x0f09, 0x19a5: 0x1199, 0x19a6: 0x0f31, 0x19a7: 0x0249, 0x19a8: 0x0f41, 0x19a9: 0x0259,
+ 0x19aa: 0x0f51, 0x19ab: 0x0359, 0x19ac: 0x0f61, 0x19ad: 0x0f71, 0x19ae: 0x00d9, 0x19af: 0x0f99,
+ 0x19b0: 0x2039, 0x19b1: 0x0269, 0x19b2: 0x01d9, 0x19b3: 0x0fa9, 0x19b4: 0x0fb9, 0x19b5: 0x1089,
+ 0x19b6: 0x0279, 0x19b7: 0x0369, 0x19b8: 0x0289, 0x19b9: 0x13d1, 0x19ba: 0x0039, 0x19bb: 0x0ee9,
+ 0x19bc: 0x1159, 0x19bd: 0x0ef9, 0x19be: 0x0f09, 0x19bf: 0x1199,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0f31, 0x19c1: 0x0249, 0x19c2: 0x0f41, 0x19c3: 0x0259, 0x19c4: 0x0f51, 0x19c5: 0x0359,
+ 0x19c6: 0x0f61, 0x19c7: 0x0f71, 0x19c8: 0x00d9, 0x19c9: 0x0f99, 0x19ca: 0x2039, 0x19cb: 0x0269,
+ 0x19cc: 0x01d9, 0x19cd: 0x0fa9, 0x19ce: 0x0fb9, 0x19cf: 0x1089, 0x19d0: 0x0279, 0x19d1: 0x0369,
+ 0x19d2: 0x0289, 0x19d3: 0x13d1, 0x19d4: 0x0039, 0x19d5: 0x0ee9, 0x19d6: 0x1159, 0x19d7: 0x0ef9,
+ 0x19d8: 0x0f09, 0x19d9: 0x1199, 0x19da: 0x0f31, 0x19db: 0x0249, 0x19dc: 0x0f41, 0x19dd: 0x0259,
+ 0x19de: 0x0f51, 0x19df: 0x0359, 0x19e0: 0x0f61, 0x19e1: 0x0f71, 0x19e2: 0x00d9, 0x19e3: 0x0f99,
+ 0x19e4: 0x2039, 0x19e5: 0x0269, 0x19e6: 0x01d9, 0x19e7: 0x0fa9, 0x19e8: 0x0fb9, 0x19e9: 0x1089,
+ 0x19ea: 0x0279, 0x19eb: 0x0369, 0x19ec: 0x0289, 0x19ed: 0x13d1, 0x19ee: 0x0039, 0x19ef: 0x0ee9,
+ 0x19f0: 0x1159, 0x19f1: 0x0ef9, 0x19f2: 0x0f09, 0x19f3: 0x1199, 0x19f4: 0x0f31, 0x19f5: 0x0249,
+ 0x19f6: 0x0f41, 0x19f7: 0x0259, 0x19f8: 0x0f51, 0x19f9: 0x0359, 0x19fa: 0x0f61, 0x19fb: 0x0f71,
+ 0x19fc: 0x00d9, 0x19fd: 0x0f99, 0x19fe: 0x2039, 0x19ff: 0x0269,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x01d9, 0x1a01: 0x0fa9, 0x1a02: 0x0fb9, 0x1a03: 0x1089, 0x1a04: 0x0279, 0x1a05: 0x0369,
+ 0x1a06: 0x0289, 0x1a07: 0x13d1, 0x1a08: 0x0039, 0x1a09: 0x0ee9, 0x1a0a: 0x1159, 0x1a0b: 0x0ef9,
+ 0x1a0c: 0x0f09, 0x1a0d: 0x1199, 0x1a0e: 0x0f31, 0x1a0f: 0x0249, 0x1a10: 0x0f41, 0x1a11: 0x0259,
+ 0x1a12: 0x0f51, 0x1a13: 0x0359, 0x1a14: 0x0f61, 0x1a15: 0x0f71, 0x1a16: 0x00d9, 0x1a17: 0x0f99,
+ 0x1a18: 0x2039, 0x1a19: 0x0269, 0x1a1a: 0x01d9, 0x1a1b: 0x0fa9, 0x1a1c: 0x0fb9, 0x1a1d: 0x1089,
+ 0x1a1e: 0x0279, 0x1a1f: 0x0369, 0x1a20: 0x0289, 0x1a21: 0x13d1, 0x1a22: 0x0039, 0x1a23: 0x0ee9,
+ 0x1a24: 0x1159, 0x1a25: 0x0ef9, 0x1a26: 0x0f09, 0x1a27: 0x1199, 0x1a28: 0x0f31, 0x1a29: 0x0249,
+ 0x1a2a: 0x0f41, 0x1a2b: 0x0259, 0x1a2c: 0x0f51, 0x1a2d: 0x0359, 0x1a2e: 0x0f61, 0x1a2f: 0x0f71,
+ 0x1a30: 0x00d9, 0x1a31: 0x0f99, 0x1a32: 0x2039, 0x1a33: 0x0269, 0x1a34: 0x01d9, 0x1a35: 0x0fa9,
+ 0x1a36: 0x0fb9, 0x1a37: 0x1089, 0x1a38: 0x0279, 0x1a39: 0x0369, 0x1a3a: 0x0289, 0x1a3b: 0x13d1,
+ 0x1a3c: 0x0039, 0x1a3d: 0x0ee9, 0x1a3e: 0x1159, 0x1a3f: 0x0ef9,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x0f09, 0x1a41: 0x1199, 0x1a42: 0x0f31, 0x1a43: 0x0249, 0x1a44: 0x0f41, 0x1a45: 0x0259,
+ 0x1a46: 0x0f51, 0x1a47: 0x0359, 0x1a48: 0x0f61, 0x1a49: 0x0f71, 0x1a4a: 0x00d9, 0x1a4b: 0x0f99,
+ 0x1a4c: 0x2039, 0x1a4d: 0x0269, 0x1a4e: 0x01d9, 0x1a4f: 0x0fa9, 0x1a50: 0x0fb9, 0x1a51: 0x1089,
+ 0x1a52: 0x0279, 0x1a53: 0x0369, 0x1a54: 0x0289, 0x1a55: 0x13d1, 0x1a56: 0x0039, 0x1a57: 0x0ee9,
+ 0x1a58: 0x1159, 0x1a59: 0x0ef9, 0x1a5a: 0x0f09, 0x1a5b: 0x1199, 0x1a5c: 0x0f31, 0x1a5d: 0x0249,
+ 0x1a5e: 0x0f41, 0x1a5f: 0x0259, 0x1a60: 0x0f51, 0x1a61: 0x0359, 0x1a62: 0x0f61, 0x1a63: 0x0f71,
+ 0x1a64: 0x00d9, 0x1a65: 0x0f99, 0x1a66: 0x2039, 0x1a67: 0x0269, 0x1a68: 0x01d9, 0x1a69: 0x0fa9,
+ 0x1a6a: 0x0fb9, 0x1a6b: 0x1089, 0x1a6c: 0x0279, 0x1a6d: 0x0369, 0x1a6e: 0x0289, 0x1a6f: 0x13d1,
+ 0x1a70: 0x0039, 0x1a71: 0x0ee9, 0x1a72: 0x1159, 0x1a73: 0x0ef9, 0x1a74: 0x0f09, 0x1a75: 0x1199,
+ 0x1a76: 0x0f31, 0x1a77: 0x0249, 0x1a78: 0x0f41, 0x1a79: 0x0259, 0x1a7a: 0x0f51, 0x1a7b: 0x0359,
+ 0x1a7c: 0x0f61, 0x1a7d: 0x0f71, 0x1a7e: 0x00d9, 0x1a7f: 0x0f99,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x2039, 0x1a81: 0x0269, 0x1a82: 0x01d9, 0x1a83: 0x0fa9, 0x1a84: 0x0fb9, 0x1a85: 0x1089,
+ 0x1a86: 0x0279, 0x1a87: 0x0369, 0x1a88: 0x0289, 0x1a89: 0x13d1, 0x1a8a: 0x0039, 0x1a8b: 0x0ee9,
+ 0x1a8c: 0x1159, 0x1a8d: 0x0ef9, 0x1a8e: 0x0f09, 0x1a8f: 0x1199, 0x1a90: 0x0f31, 0x1a91: 0x0249,
+ 0x1a92: 0x0f41, 0x1a93: 0x0259, 0x1a94: 0x0f51, 0x1a95: 0x0359, 0x1a96: 0x0f61, 0x1a97: 0x0f71,
+ 0x1a98: 0x00d9, 0x1a99: 0x0f99, 0x1a9a: 0x2039, 0x1a9b: 0x0269, 0x1a9c: 0x01d9, 0x1a9d: 0x0fa9,
+ 0x1a9e: 0x0fb9, 0x1a9f: 0x1089, 0x1aa0: 0x0279, 0x1aa1: 0x0369, 0x1aa2: 0x0289, 0x1aa3: 0x13d1,
+ 0x1aa4: 0xba81, 0x1aa5: 0xba99, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0xbab1, 0x1aa9: 0x1099,
+ 0x1aaa: 0x10b1, 0x1aab: 0x10c9, 0x1aac: 0xbac9, 0x1aad: 0xbae1, 0x1aae: 0xbaf9, 0x1aaf: 0x1429,
+ 0x1ab0: 0x1a31, 0x1ab1: 0xbb11, 0x1ab2: 0xbb29, 0x1ab3: 0xbb41, 0x1ab4: 0xbb59, 0x1ab5: 0xbb71,
+ 0x1ab6: 0xbb89, 0x1ab7: 0x2109, 0x1ab8: 0x1111, 0x1ab9: 0x1429, 0x1aba: 0xbba1, 0x1abb: 0xbbb9,
+ 0x1abc: 0xbbd1, 0x1abd: 0x10e1, 0x1abe: 0x10f9, 0x1abf: 0xbbe9,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2079, 0x1ac1: 0xbc01, 0x1ac2: 0xbab1, 0x1ac3: 0x1099, 0x1ac4: 0x10b1, 0x1ac5: 0x10c9,
+ 0x1ac6: 0xbac9, 0x1ac7: 0xbae1, 0x1ac8: 0xbaf9, 0x1ac9: 0x1429, 0x1aca: 0x1a31, 0x1acb: 0xbb11,
+ 0x1acc: 0xbb29, 0x1acd: 0xbb41, 0x1ace: 0xbb59, 0x1acf: 0xbb71, 0x1ad0: 0xbb89, 0x1ad1: 0x2109,
+ 0x1ad2: 0x1111, 0x1ad3: 0xbba1, 0x1ad4: 0xbba1, 0x1ad5: 0xbbb9, 0x1ad6: 0xbbd1, 0x1ad7: 0x10e1,
+ 0x1ad8: 0x10f9, 0x1ad9: 0xbbe9, 0x1ada: 0x2079, 0x1adb: 0xbc21, 0x1adc: 0xbac9, 0x1add: 0x1429,
+ 0x1ade: 0xbb11, 0x1adf: 0x10e1, 0x1ae0: 0x1111, 0x1ae1: 0x2109, 0x1ae2: 0xbab1, 0x1ae3: 0x1099,
+ 0x1ae4: 0x10b1, 0x1ae5: 0x10c9, 0x1ae6: 0xbac9, 0x1ae7: 0xbae1, 0x1ae8: 0xbaf9, 0x1ae9: 0x1429,
+ 0x1aea: 0x1a31, 0x1aeb: 0xbb11, 0x1aec: 0xbb29, 0x1aed: 0xbb41, 0x1aee: 0xbb59, 0x1aef: 0xbb71,
+ 0x1af0: 0xbb89, 0x1af1: 0x2109, 0x1af2: 0x1111, 0x1af3: 0x1429, 0x1af4: 0xbba1, 0x1af5: 0xbbb9,
+ 0x1af6: 0xbbd1, 0x1af7: 0x10e1, 0x1af8: 0x10f9, 0x1af9: 0xbbe9, 0x1afa: 0x2079, 0x1afb: 0xbc01,
+ 0x1afc: 0xbab1, 0x1afd: 0x1099, 0x1afe: 0x10b1, 0x1aff: 0x10c9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0xbac9, 0x1b01: 0xbae1, 0x1b02: 0xbaf9, 0x1b03: 0x1429, 0x1b04: 0x1a31, 0x1b05: 0xbb11,
+ 0x1b06: 0xbb29, 0x1b07: 0xbb41, 0x1b08: 0xbb59, 0x1b09: 0xbb71, 0x1b0a: 0xbb89, 0x1b0b: 0x2109,
+ 0x1b0c: 0x1111, 0x1b0d: 0xbba1, 0x1b0e: 0xbba1, 0x1b0f: 0xbbb9, 0x1b10: 0xbbd1, 0x1b11: 0x10e1,
+ 0x1b12: 0x10f9, 0x1b13: 0xbbe9, 0x1b14: 0x2079, 0x1b15: 0xbc21, 0x1b16: 0xbac9, 0x1b17: 0x1429,
+ 0x1b18: 0xbb11, 0x1b19: 0x10e1, 0x1b1a: 0x1111, 0x1b1b: 0x2109, 0x1b1c: 0xbab1, 0x1b1d: 0x1099,
+ 0x1b1e: 0x10b1, 0x1b1f: 0x10c9, 0x1b20: 0xbac9, 0x1b21: 0xbae1, 0x1b22: 0xbaf9, 0x1b23: 0x1429,
+ 0x1b24: 0x1a31, 0x1b25: 0xbb11, 0x1b26: 0xbb29, 0x1b27: 0xbb41, 0x1b28: 0xbb59, 0x1b29: 0xbb71,
+ 0x1b2a: 0xbb89, 0x1b2b: 0x2109, 0x1b2c: 0x1111, 0x1b2d: 0x1429, 0x1b2e: 0xbba1, 0x1b2f: 0xbbb9,
+ 0x1b30: 0xbbd1, 0x1b31: 0x10e1, 0x1b32: 0x10f9, 0x1b33: 0xbbe9, 0x1b34: 0x2079, 0x1b35: 0xbc01,
+ 0x1b36: 0xbab1, 0x1b37: 0x1099, 0x1b38: 0x10b1, 0x1b39: 0x10c9, 0x1b3a: 0xbac9, 0x1b3b: 0xbae1,
+ 0x1b3c: 0xbaf9, 0x1b3d: 0x1429, 0x1b3e: 0x1a31, 0x1b3f: 0xbb11,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbb29, 0x1b41: 0xbb41, 0x1b42: 0xbb59, 0x1b43: 0xbb71, 0x1b44: 0xbb89, 0x1b45: 0x2109,
+ 0x1b46: 0x1111, 0x1b47: 0xbba1, 0x1b48: 0xbba1, 0x1b49: 0xbbb9, 0x1b4a: 0xbbd1, 0x1b4b: 0x10e1,
+ 0x1b4c: 0x10f9, 0x1b4d: 0xbbe9, 0x1b4e: 0x2079, 0x1b4f: 0xbc21, 0x1b50: 0xbac9, 0x1b51: 0x1429,
+ 0x1b52: 0xbb11, 0x1b53: 0x10e1, 0x1b54: 0x1111, 0x1b55: 0x2109, 0x1b56: 0xbab1, 0x1b57: 0x1099,
+ 0x1b58: 0x10b1, 0x1b59: 0x10c9, 0x1b5a: 0xbac9, 0x1b5b: 0xbae1, 0x1b5c: 0xbaf9, 0x1b5d: 0x1429,
+ 0x1b5e: 0x1a31, 0x1b5f: 0xbb11, 0x1b60: 0xbb29, 0x1b61: 0xbb41, 0x1b62: 0xbb59, 0x1b63: 0xbb71,
+ 0x1b64: 0xbb89, 0x1b65: 0x2109, 0x1b66: 0x1111, 0x1b67: 0x1429, 0x1b68: 0xbba1, 0x1b69: 0xbbb9,
+ 0x1b6a: 0xbbd1, 0x1b6b: 0x10e1, 0x1b6c: 0x10f9, 0x1b6d: 0xbbe9, 0x1b6e: 0x2079, 0x1b6f: 0xbc01,
+ 0x1b70: 0xbab1, 0x1b71: 0x1099, 0x1b72: 0x10b1, 0x1b73: 0x10c9, 0x1b74: 0xbac9, 0x1b75: 0xbae1,
+ 0x1b76: 0xbaf9, 0x1b77: 0x1429, 0x1b78: 0x1a31, 0x1b79: 0xbb11, 0x1b7a: 0xbb29, 0x1b7b: 0xbb41,
+ 0x1b7c: 0xbb59, 0x1b7d: 0xbb71, 0x1b7e: 0xbb89, 0x1b7f: 0x2109,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0x1111, 0x1b81: 0xbba1, 0x1b82: 0xbba1, 0x1b83: 0xbbb9, 0x1b84: 0xbbd1, 0x1b85: 0x10e1,
+ 0x1b86: 0x10f9, 0x1b87: 0xbbe9, 0x1b88: 0x2079, 0x1b89: 0xbc21, 0x1b8a: 0xbac9, 0x1b8b: 0x1429,
+ 0x1b8c: 0xbb11, 0x1b8d: 0x10e1, 0x1b8e: 0x1111, 0x1b8f: 0x2109, 0x1b90: 0xbab1, 0x1b91: 0x1099,
+ 0x1b92: 0x10b1, 0x1b93: 0x10c9, 0x1b94: 0xbac9, 0x1b95: 0xbae1, 0x1b96: 0xbaf9, 0x1b97: 0x1429,
+ 0x1b98: 0x1a31, 0x1b99: 0xbb11, 0x1b9a: 0xbb29, 0x1b9b: 0xbb41, 0x1b9c: 0xbb59, 0x1b9d: 0xbb71,
+ 0x1b9e: 0xbb89, 0x1b9f: 0x2109, 0x1ba0: 0x1111, 0x1ba1: 0x1429, 0x1ba2: 0xbba1, 0x1ba3: 0xbbb9,
+ 0x1ba4: 0xbbd1, 0x1ba5: 0x10e1, 0x1ba6: 0x10f9, 0x1ba7: 0xbbe9, 0x1ba8: 0x2079, 0x1ba9: 0xbc01,
+ 0x1baa: 0xbab1, 0x1bab: 0x1099, 0x1bac: 0x10b1, 0x1bad: 0x10c9, 0x1bae: 0xbac9, 0x1baf: 0xbae1,
+ 0x1bb0: 0xbaf9, 0x1bb1: 0x1429, 0x1bb2: 0x1a31, 0x1bb3: 0xbb11, 0x1bb4: 0xbb29, 0x1bb5: 0xbb41,
+ 0x1bb6: 0xbb59, 0x1bb7: 0xbb71, 0x1bb8: 0xbb89, 0x1bb9: 0x2109, 0x1bba: 0x1111, 0x1bbb: 0xbba1,
+ 0x1bbc: 0xbba1, 0x1bbd: 0xbbb9, 0x1bbe: 0xbbd1, 0x1bbf: 0x10e1,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x10f9, 0x1bc1: 0xbbe9, 0x1bc2: 0x2079, 0x1bc3: 0xbc21, 0x1bc4: 0xbac9, 0x1bc5: 0x1429,
+ 0x1bc6: 0xbb11, 0x1bc7: 0x10e1, 0x1bc8: 0x1111, 0x1bc9: 0x2109, 0x1bca: 0xbc41, 0x1bcb: 0xbc41,
+ 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x1f41, 0x1bcf: 0x00c9, 0x1bd0: 0x0069, 0x1bd1: 0x0079,
+ 0x1bd2: 0x1f51, 0x1bd3: 0x1f61, 0x1bd4: 0x1f71, 0x1bd5: 0x1f81, 0x1bd6: 0x1f91, 0x1bd7: 0x1fa1,
+ 0x1bd8: 0x1f41, 0x1bd9: 0x00c9, 0x1bda: 0x0069, 0x1bdb: 0x0079, 0x1bdc: 0x1f51, 0x1bdd: 0x1f61,
+ 0x1bde: 0x1f71, 0x1bdf: 0x1f81, 0x1be0: 0x1f91, 0x1be1: 0x1fa1, 0x1be2: 0x1f41, 0x1be3: 0x00c9,
+ 0x1be4: 0x0069, 0x1be5: 0x0079, 0x1be6: 0x1f51, 0x1be7: 0x1f61, 0x1be8: 0x1f71, 0x1be9: 0x1f81,
+ 0x1bea: 0x1f91, 0x1beb: 0x1fa1, 0x1bec: 0x1f41, 0x1bed: 0x00c9, 0x1bee: 0x0069, 0x1bef: 0x0079,
+ 0x1bf0: 0x1f51, 0x1bf1: 0x1f61, 0x1bf2: 0x1f71, 0x1bf3: 0x1f81, 0x1bf4: 0x1f91, 0x1bf5: 0x1fa1,
+ 0x1bf6: 0x1f41, 0x1bf7: 0x00c9, 0x1bf8: 0x0069, 0x1bf9: 0x0079, 0x1bfa: 0x1f51, 0x1bfb: 0x1f61,
+ 0x1bfc: 0x1f71, 0x1bfd: 0x1f81, 0x1bfe: 0x1f91, 0x1bff: 0x1fa1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115,
+ 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135,
+ 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115,
+ 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175,
+ 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115,
+ 0x1c1e: 0x8b05, 0x1c1f: 0x8b05, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08,
+ 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08,
+ 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08,
+ 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08,
+ 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08,
+ 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xb189, 0x1c41: 0xb1a1, 0x1c42: 0xb201, 0x1c43: 0xb249, 0x1c44: 0x0040, 0x1c45: 0xb411,
+ 0x1c46: 0xb291, 0x1c47: 0xb219, 0x1c48: 0xb309, 0x1c49: 0xb429, 0x1c4a: 0xb399, 0x1c4b: 0xb3b1,
+ 0x1c4c: 0xb3c9, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0xb369, 0x1c51: 0xb2d9,
+ 0x1c52: 0xb381, 0x1c53: 0xb279, 0x1c54: 0xb2c1, 0x1c55: 0xb1d1, 0x1c56: 0xb1e9, 0x1c57: 0xb231,
+ 0x1c58: 0xb261, 0x1c59: 0xb2f1, 0x1c5a: 0xb321, 0x1c5b: 0xb351, 0x1c5c: 0xbc59, 0x1c5d: 0x7949,
+ 0x1c5e: 0xbc71, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040,
+ 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0x0040, 0x1c69: 0xb429,
+ 0x1c6a: 0xb399, 0x1c6b: 0xb3b1, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339,
+ 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1,
+ 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0x0040, 0x1c7b: 0xb351,
+ 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0xb201, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040,
+ 0x1c86: 0x0040, 0x1c87: 0xb219, 0x1c88: 0x0040, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0x0040, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0x0040, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0x0040, 0x1c94: 0xb2c1, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0xb231,
+ 0x1c98: 0x0040, 0x1c99: 0xb2f1, 0x1c9a: 0x0040, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x7949,
+ 0x1c9e: 0x0040, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0x0040, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351,
+ 0x1cbc: 0xbc59, 0x1cbd: 0x0040, 0x1cbe: 0xbc71, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0xb189, 0x1cc1: 0xb1a1, 0x1cc2: 0xb201, 0x1cc3: 0xb249, 0x1cc4: 0xb3f9, 0x1cc5: 0xb411,
+ 0x1cc6: 0xb291, 0x1cc7: 0xb219, 0x1cc8: 0xb309, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0xb3c9, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0xb369, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0xb279, 0x1cd4: 0xb2c1, 0x1cd5: 0xb1d1, 0x1cd6: 0xb1e9, 0x1cd7: 0xb231,
+ 0x1cd8: 0xb261, 0x1cd9: 0xb2f1, 0x1cda: 0xb321, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x0040,
+ 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0xb249,
+ 0x1ce4: 0x0040, 0x1ce5: 0xb411, 0x1ce6: 0xb291, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0x0040, 0x1ceb: 0xb3b1, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0xb279, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0xb261, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0x0040, 0x1d01: 0xbca2, 0x1d02: 0xbcba, 0x1d03: 0xbcd2, 0x1d04: 0xbcea, 0x1d05: 0xbd02,
+ 0x1d06: 0xbd1a, 0x1d07: 0xbd32, 0x1d08: 0xbd4a, 0x1d09: 0xbd62, 0x1d0a: 0xbd7a, 0x1d0b: 0x0018,
+ 0x1d0c: 0x0018, 0x1d0d: 0x0040, 0x1d0e: 0x0040, 0x1d0f: 0x0040, 0x1d10: 0xbd92, 0x1d11: 0xbdb2,
+ 0x1d12: 0xbdd2, 0x1d13: 0xbdf2, 0x1d14: 0xbe12, 0x1d15: 0xbe32, 0x1d16: 0xbe52, 0x1d17: 0xbe72,
+ 0x1d18: 0xbe92, 0x1d19: 0xbeb2, 0x1d1a: 0xbed2, 0x1d1b: 0xbef2, 0x1d1c: 0xbf12, 0x1d1d: 0xbf32,
+ 0x1d1e: 0xbf52, 0x1d1f: 0xbf72, 0x1d20: 0xbf92, 0x1d21: 0xbfb2, 0x1d22: 0xbfd2, 0x1d23: 0xbff2,
+ 0x1d24: 0xc012, 0x1d25: 0xc032, 0x1d26: 0xc052, 0x1d27: 0xc072, 0x1d28: 0xc092, 0x1d29: 0xc0b2,
+ 0x1d2a: 0xc0d1, 0x1d2b: 0x1159, 0x1d2c: 0x0269, 0x1d2d: 0x6671, 0x1d2e: 0xc111, 0x1d2f: 0x0040,
+ 0x1d30: 0x0039, 0x1d31: 0x0ee9, 0x1d32: 0x1159, 0x1d33: 0x0ef9, 0x1d34: 0x0f09, 0x1d35: 0x1199,
+ 0x1d36: 0x0f31, 0x1d37: 0x0249, 0x1d38: 0x0f41, 0x1d39: 0x0259, 0x1d3a: 0x0f51, 0x1d3b: 0x0359,
+ 0x1d3c: 0x0f61, 0x1d3d: 0x0f71, 0x1d3e: 0x00d9, 0x1d3f: 0x0f99,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x2039, 0x1d41: 0x0269, 0x1d42: 0x01d9, 0x1d43: 0x0fa9, 0x1d44: 0x0fb9, 0x1d45: 0x1089,
+ 0x1d46: 0x0279, 0x1d47: 0x0369, 0x1d48: 0x0289, 0x1d49: 0x13d1, 0x1d4a: 0xc129, 0x1d4b: 0x65b1,
+ 0x1d4c: 0xc141, 0x1d4d: 0x1441, 0x1d4e: 0xc159, 0x1d4f: 0xc179, 0x1d50: 0x0018, 0x1d51: 0x0018,
+ 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018,
+ 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018,
+ 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018,
+ 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018,
+ 0x1d6a: 0xc191, 0x1d6b: 0xc1a9, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040,
+ 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018,
+ 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018,
+ 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0xc1d9, 0x1d81: 0xc211, 0x1d82: 0xc249, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040,
+ 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040,
+ 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0xc269, 0x1d91: 0xc289,
+ 0x1d92: 0xc2a9, 0x1d93: 0xc2c9, 0x1d94: 0xc2e9, 0x1d95: 0xc309, 0x1d96: 0xc329, 0x1d97: 0xc349,
+ 0x1d98: 0xc369, 0x1d99: 0xc389, 0x1d9a: 0xc3a9, 0x1d9b: 0xc3c9, 0x1d9c: 0xc3e9, 0x1d9d: 0xc409,
+ 0x1d9e: 0xc429, 0x1d9f: 0xc449, 0x1da0: 0xc469, 0x1da1: 0xc489, 0x1da2: 0xc4a9, 0x1da3: 0xc4c9,
+ 0x1da4: 0xc4e9, 0x1da5: 0xc509, 0x1da6: 0xc529, 0x1da7: 0xc549, 0x1da8: 0xc569, 0x1da9: 0xc589,
+ 0x1daa: 0xc5a9, 0x1dab: 0xc5c9, 0x1dac: 0xc5e9, 0x1dad: 0xc609, 0x1dae: 0xc629, 0x1daf: 0xc649,
+ 0x1db0: 0xc669, 0x1db1: 0xc689, 0x1db2: 0xc6a9, 0x1db3: 0xc6c9, 0x1db4: 0xc6e9, 0x1db5: 0xc709,
+ 0x1db6: 0xc729, 0x1db7: 0xc749, 0x1db8: 0xc769, 0x1db9: 0xc789, 0x1dba: 0xc7a9, 0x1dbb: 0xc7c9,
+ 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xcaf9, 0x1dc1: 0xcb19, 0x1dc2: 0xcb39, 0x1dc3: 0x8b1d, 0x1dc4: 0xcb59, 0x1dc5: 0xcb79,
+ 0x1dc6: 0xcb99, 0x1dc7: 0xcbb9, 0x1dc8: 0xcbd9, 0x1dc9: 0xcbf9, 0x1dca: 0xcc19, 0x1dcb: 0xcc39,
+ 0x1dcc: 0xcc59, 0x1dcd: 0x8b3d, 0x1dce: 0xcc79, 0x1dcf: 0xcc99, 0x1dd0: 0xccb9, 0x1dd1: 0xccd9,
+ 0x1dd2: 0x8b5d, 0x1dd3: 0xccf9, 0x1dd4: 0xcd19, 0x1dd5: 0xc429, 0x1dd6: 0x8b7d, 0x1dd7: 0xcd39,
+ 0x1dd8: 0xcd59, 0x1dd9: 0xcd79, 0x1dda: 0xcd99, 0x1ddb: 0xcdb9, 0x1ddc: 0x8b9d, 0x1ddd: 0xcdd9,
+ 0x1dde: 0xcdf9, 0x1ddf: 0xce19, 0x1de0: 0xce39, 0x1de1: 0xce59, 0x1de2: 0xc789, 0x1de3: 0xce79,
+ 0x1de4: 0xce99, 0x1de5: 0xceb9, 0x1de6: 0xced9, 0x1de7: 0xcef9, 0x1de8: 0xcf19, 0x1de9: 0xcf39,
+ 0x1dea: 0xcf59, 0x1deb: 0xcf79, 0x1dec: 0xcf99, 0x1ded: 0xcfb9, 0x1dee: 0xcfd9, 0x1def: 0xcff9,
+ 0x1df0: 0xd019, 0x1df1: 0xd039, 0x1df2: 0xd039, 0x1df3: 0xd039, 0x1df4: 0x8bbd, 0x1df5: 0xd059,
+ 0x1df6: 0xd079, 0x1df7: 0xd099, 0x1df8: 0x8bdd, 0x1df9: 0xd0b9, 0x1dfa: 0xd0d9, 0x1dfb: 0xd0f9,
+ 0x1dfc: 0xd119, 0x1dfd: 0xd139, 0x1dfe: 0xd159, 0x1dff: 0xd179,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xd199, 0x1e01: 0xd1b9, 0x1e02: 0xd1d9, 0x1e03: 0xd1f9, 0x1e04: 0xd219, 0x1e05: 0xd239,
+ 0x1e06: 0xd239, 0x1e07: 0xd259, 0x1e08: 0xd279, 0x1e09: 0xd299, 0x1e0a: 0xd2b9, 0x1e0b: 0xd2d9,
+ 0x1e0c: 0xd2f9, 0x1e0d: 0xd319, 0x1e0e: 0xd339, 0x1e0f: 0xd359, 0x1e10: 0xd379, 0x1e11: 0xd399,
+ 0x1e12: 0xd3b9, 0x1e13: 0xd3d9, 0x1e14: 0xd3f9, 0x1e15: 0xd419, 0x1e16: 0xd439, 0x1e17: 0xd459,
+ 0x1e18: 0xd479, 0x1e19: 0x8bfd, 0x1e1a: 0xd499, 0x1e1b: 0xd4b9, 0x1e1c: 0xd4d9, 0x1e1d: 0xc309,
+ 0x1e1e: 0xd4f9, 0x1e1f: 0xd519, 0x1e20: 0x8c1d, 0x1e21: 0x8c3d, 0x1e22: 0xd539, 0x1e23: 0xd559,
+ 0x1e24: 0xd579, 0x1e25: 0xd599, 0x1e26: 0xd5b9, 0x1e27: 0xd5d9, 0x1e28: 0x2040, 0x1e29: 0xd5f9,
+ 0x1e2a: 0xd619, 0x1e2b: 0xd619, 0x1e2c: 0x8c5d, 0x1e2d: 0xd639, 0x1e2e: 0xd659, 0x1e2f: 0xd679,
+ 0x1e30: 0xd699, 0x1e31: 0x8c7d, 0x1e32: 0xd6b9, 0x1e33: 0xd6d9, 0x1e34: 0x2040, 0x1e35: 0xd6f9,
+ 0x1e36: 0xd719, 0x1e37: 0xd739, 0x1e38: 0xd759, 0x1e39: 0xd779, 0x1e3a: 0xd799, 0x1e3b: 0x8c9d,
+ 0x1e3c: 0xd7b9, 0x1e3d: 0x8cbd, 0x1e3e: 0xd7d9, 0x1e3f: 0xd7f9,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd819, 0x1e41: 0xd839, 0x1e42: 0xd859, 0x1e43: 0xd879, 0x1e44: 0xd899, 0x1e45: 0xd8b9,
+ 0x1e46: 0xd8d9, 0x1e47: 0xd8f9, 0x1e48: 0xd919, 0x1e49: 0x8cdd, 0x1e4a: 0xd939, 0x1e4b: 0xd959,
+ 0x1e4c: 0xd979, 0x1e4d: 0xd999, 0x1e4e: 0xd9b9, 0x1e4f: 0x8cfd, 0x1e50: 0xd9d9, 0x1e51: 0x8d1d,
+ 0x1e52: 0x8d3d, 0x1e53: 0xd9f9, 0x1e54: 0xda19, 0x1e55: 0xda19, 0x1e56: 0xda39, 0x1e57: 0x8d5d,
+ 0x1e58: 0x8d7d, 0x1e59: 0xda59, 0x1e5a: 0xda79, 0x1e5b: 0xda99, 0x1e5c: 0xdab9, 0x1e5d: 0xdad9,
+ 0x1e5e: 0xdaf9, 0x1e5f: 0xdb19, 0x1e60: 0xdb39, 0x1e61: 0xdb59, 0x1e62: 0xdb79, 0x1e63: 0xdb99,
+ 0x1e64: 0x8d9d, 0x1e65: 0xdbb9, 0x1e66: 0xdbd9, 0x1e67: 0xdbf9, 0x1e68: 0xdc19, 0x1e69: 0xdbf9,
+ 0x1e6a: 0xdc39, 0x1e6b: 0xdc59, 0x1e6c: 0xdc79, 0x1e6d: 0xdc99, 0x1e6e: 0xdcb9, 0x1e6f: 0xdcd9,
+ 0x1e70: 0xdcf9, 0x1e71: 0xdd19, 0x1e72: 0xdd39, 0x1e73: 0xdd59, 0x1e74: 0xdd79, 0x1e75: 0xdd99,
+ 0x1e76: 0xddb9, 0x1e77: 0xddd9, 0x1e78: 0x8dbd, 0x1e79: 0xddf9, 0x1e7a: 0xde19, 0x1e7b: 0xde39,
+ 0x1e7c: 0xde59, 0x1e7d: 0xde79, 0x1e7e: 0x8ddd, 0x1e7f: 0xde99,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xe599, 0x1e81: 0xe5b9, 0x1e82: 0xe5d9, 0x1e83: 0xe5f9, 0x1e84: 0xe619, 0x1e85: 0xe639,
+ 0x1e86: 0x8efd, 0x1e87: 0xe659, 0x1e88: 0xe679, 0x1e89: 0xe699, 0x1e8a: 0xe6b9, 0x1e8b: 0xe6d9,
+ 0x1e8c: 0xe6f9, 0x1e8d: 0x8f1d, 0x1e8e: 0xe719, 0x1e8f: 0xe739, 0x1e90: 0x8f3d, 0x1e91: 0x8f5d,
+ 0x1e92: 0xe759, 0x1e93: 0xe779, 0x1e94: 0xe799, 0x1e95: 0xe7b9, 0x1e96: 0xe7d9, 0x1e97: 0xe7f9,
+ 0x1e98: 0xe819, 0x1e99: 0xe839, 0x1e9a: 0xe859, 0x1e9b: 0x8f7d, 0x1e9c: 0xe879, 0x1e9d: 0x8f9d,
+ 0x1e9e: 0xe899, 0x1e9f: 0x2040, 0x1ea0: 0xe8b9, 0x1ea1: 0xe8d9, 0x1ea2: 0xe8f9, 0x1ea3: 0x8fbd,
+ 0x1ea4: 0xe919, 0x1ea5: 0xe939, 0x1ea6: 0x8fdd, 0x1ea7: 0x8ffd, 0x1ea8: 0xe959, 0x1ea9: 0xe979,
+ 0x1eaa: 0xe999, 0x1eab: 0xe9b9, 0x1eac: 0xe9d9, 0x1ead: 0xe9d9, 0x1eae: 0xe9f9, 0x1eaf: 0xea19,
+ 0x1eb0: 0xea39, 0x1eb1: 0xea59, 0x1eb2: 0xea79, 0x1eb3: 0xea99, 0x1eb4: 0xeab9, 0x1eb5: 0x901d,
+ 0x1eb6: 0xead9, 0x1eb7: 0x903d, 0x1eb8: 0xeaf9, 0x1eb9: 0x905d, 0x1eba: 0xeb19, 0x1ebb: 0x907d,
+ 0x1ebc: 0x909d, 0x1ebd: 0x90bd, 0x1ebe: 0xeb39, 0x1ebf: 0xeb59,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xeb79, 0x1ec1: 0x90dd, 0x1ec2: 0x90fd, 0x1ec3: 0x911d, 0x1ec4: 0x913d, 0x1ec5: 0xeb99,
+ 0x1ec6: 0xebb9, 0x1ec7: 0xebb9, 0x1ec8: 0xebd9, 0x1ec9: 0xebf9, 0x1eca: 0xec19, 0x1ecb: 0xec39,
+ 0x1ecc: 0xec59, 0x1ecd: 0x915d, 0x1ece: 0xec79, 0x1ecf: 0xec99, 0x1ed0: 0xecb9, 0x1ed1: 0xecd9,
+ 0x1ed2: 0x917d, 0x1ed3: 0xecf9, 0x1ed4: 0x919d, 0x1ed5: 0x91bd, 0x1ed6: 0xed19, 0x1ed7: 0xed39,
+ 0x1ed8: 0xed59, 0x1ed9: 0xed79, 0x1eda: 0xed99, 0x1edb: 0xedb9, 0x1edc: 0x91dd, 0x1edd: 0x91fd,
+ 0x1ede: 0x921d, 0x1edf: 0x2040, 0x1ee0: 0xedd9, 0x1ee1: 0x923d, 0x1ee2: 0xedf9, 0x1ee3: 0xee19,
+ 0x1ee4: 0xee39, 0x1ee5: 0x925d, 0x1ee6: 0xee59, 0x1ee7: 0xee79, 0x1ee8: 0xee99, 0x1ee9: 0xeeb9,
+ 0x1eea: 0xeed9, 0x1eeb: 0x927d, 0x1eec: 0xeef9, 0x1eed: 0xef19, 0x1eee: 0xef39, 0x1eef: 0xef59,
+ 0x1ef0: 0xef79, 0x1ef1: 0xef99, 0x1ef2: 0x929d, 0x1ef3: 0x92bd, 0x1ef4: 0xefb9, 0x1ef5: 0x92dd,
+ 0x1ef6: 0xefd9, 0x1ef7: 0x92fd, 0x1ef8: 0xeff9, 0x1ef9: 0xf019, 0x1efa: 0xf039, 0x1efb: 0x931d,
+ 0x1efc: 0x933d, 0x1efd: 0xf059, 0x1efe: 0x935d, 0x1eff: 0xf079,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xf6b9, 0x1f01: 0xf6d9, 0x1f02: 0xf6f9, 0x1f03: 0xf719, 0x1f04: 0xf739, 0x1f05: 0x951d,
+ 0x1f06: 0xf759, 0x1f07: 0xf779, 0x1f08: 0xf799, 0x1f09: 0xf7b9, 0x1f0a: 0xf7d9, 0x1f0b: 0x953d,
+ 0x1f0c: 0x955d, 0x1f0d: 0xf7f9, 0x1f0e: 0xf819, 0x1f0f: 0xf839, 0x1f10: 0xf859, 0x1f11: 0xf879,
+ 0x1f12: 0xf899, 0x1f13: 0x957d, 0x1f14: 0xf8b9, 0x1f15: 0xf8d9, 0x1f16: 0xf8f9, 0x1f17: 0xf919,
+ 0x1f18: 0x959d, 0x1f19: 0x95bd, 0x1f1a: 0xf939, 0x1f1b: 0xf959, 0x1f1c: 0xf979, 0x1f1d: 0x95dd,
+ 0x1f1e: 0xf999, 0x1f1f: 0xf9b9, 0x1f20: 0x6815, 0x1f21: 0x95fd, 0x1f22: 0xf9d9, 0x1f23: 0xf9f9,
+ 0x1f24: 0xfa19, 0x1f25: 0x961d, 0x1f26: 0xfa39, 0x1f27: 0xfa59, 0x1f28: 0xfa79, 0x1f29: 0xfa99,
+ 0x1f2a: 0xfab9, 0x1f2b: 0xfad9, 0x1f2c: 0xfaf9, 0x1f2d: 0x963d, 0x1f2e: 0xfb19, 0x1f2f: 0xfb39,
+ 0x1f30: 0xfb59, 0x1f31: 0x965d, 0x1f32: 0xfb79, 0x1f33: 0xfb99, 0x1f34: 0xfbb9, 0x1f35: 0xfbd9,
+ 0x1f36: 0x7b35, 0x1f37: 0x967d, 0x1f38: 0xfbf9, 0x1f39: 0xfc19, 0x1f3a: 0xfc39, 0x1f3b: 0x969d,
+ 0x1f3c: 0xfc59, 0x1f3d: 0x96bd, 0x1f3e: 0xfc79, 0x1f3f: 0xfc79,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xfc99, 0x1f41: 0x96dd, 0x1f42: 0xfcb9, 0x1f43: 0xfcd9, 0x1f44: 0xfcf9, 0x1f45: 0xfd19,
+ 0x1f46: 0xfd39, 0x1f47: 0xfd59, 0x1f48: 0xfd79, 0x1f49: 0x96fd, 0x1f4a: 0xfd99, 0x1f4b: 0xfdb9,
+ 0x1f4c: 0xfdd9, 0x1f4d: 0xfdf9, 0x1f4e: 0xfe19, 0x1f4f: 0xfe39, 0x1f50: 0x971d, 0x1f51: 0xfe59,
+ 0x1f52: 0x973d, 0x1f53: 0x975d, 0x1f54: 0x977d, 0x1f55: 0xfe79, 0x1f56: 0xfe99, 0x1f57: 0xfeb9,
+ 0x1f58: 0xfed9, 0x1f59: 0xfef9, 0x1f5a: 0xff19, 0x1f5b: 0xff39, 0x1f5c: 0xff59, 0x1f5d: 0x979d,
+ 0x1f5e: 0x0040, 0x1f5f: 0x0040, 0x1f60: 0x0040, 0x1f61: 0x0040, 0x1f62: 0x0040, 0x1f63: 0x0040,
+ 0x1f64: 0x0040, 0x1f65: 0x0040, 0x1f66: 0x0040, 0x1f67: 0x0040, 0x1f68: 0x0040, 0x1f69: 0x0040,
+ 0x1f6a: 0x0040, 0x1f6b: 0x0040, 0x1f6c: 0x0040, 0x1f6d: 0x0040, 0x1f6e: 0x0040, 0x1f6f: 0x0040,
+ 0x1f70: 0x0040, 0x1f71: 0x0040, 0x1f72: 0x0040, 0x1f73: 0x0040, 0x1f74: 0x0040, 0x1f75: 0x0040,
+ 0x1f76: 0x0040, 0x1f77: 0x0040, 0x1f78: 0x0040, 0x1f79: 0x0040, 0x1f7a: 0x0040, 0x1f7b: 0x0040,
+ 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040,
+}
+
+// idnaIndex: 35 blocks, 2240 entries, 4480 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2240]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7c, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7d, 0xca: 0x7e, 0xcb: 0x07, 0xcc: 0x7f, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x80, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x81, 0xd6: 0x82, 0xd7: 0x83,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x84, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20,
+ // Block 0x4, offset 0x100
+ 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15,
+ 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1d, 0x132: 0x1e, 0x133: 0x1f, 0x134: 0x8f, 0x135: 0x20, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x21, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x22, 0x13e: 0x23, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x24, 0x175: 0x25, 0x176: 0x26, 0x177: 0xc3,
+ 0x178: 0x27, 0x179: 0x27, 0x17a: 0x28, 0x17b: 0x27, 0x17c: 0xc4, 0x17d: 0x29, 0x17e: 0x2a, 0x17f: 0x2b,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2c, 0x181: 0x2d, 0x182: 0x2e, 0x183: 0xc5, 0x184: 0x2f, 0x185: 0x30, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xca,
+ 0x190: 0xcb, 0x191: 0x31, 0x192: 0x32, 0x193: 0x33, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9b, 0x1ab: 0xce, 0x1ac: 0x9b, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0xd1,
+ 0x1b0: 0xd2, 0x1b1: 0x34, 0x1b2: 0x27, 0x1b3: 0x35, 0x1b4: 0xd3, 0x1b5: 0xd4, 0x1b6: 0xd5, 0x1b7: 0xd6,
+ 0x1b8: 0xd7, 0x1b9: 0xd8, 0x1ba: 0xd9, 0x1bb: 0xda, 0x1bc: 0xdb, 0x1bd: 0xdc, 0x1be: 0xdd, 0x1bf: 0x36,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x37, 0x1c1: 0xde, 0x1c2: 0xdf, 0x1c3: 0xe0, 0x1c4: 0xe1, 0x1c5: 0x38, 0x1c6: 0x39, 0x1c7: 0xe2,
+ 0x1c8: 0xe3, 0x1c9: 0x3a, 0x1ca: 0x3b, 0x1cb: 0x3c, 0x1cc: 0x3d, 0x1cd: 0x3e, 0x1ce: 0x3f, 0x1cf: 0x40,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe4,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe5, 0x2d3: 0xe6, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe7, 0x2d9: 0x41, 0x2da: 0x42, 0x2db: 0xe8, 0x2dc: 0x43, 0x2dd: 0x44, 0x2de: 0x45, 0x2df: 0xe9,
+ 0x2e0: 0xea, 0x2e1: 0xeb, 0x2e2: 0xec, 0x2e3: 0xed, 0x2e4: 0xee, 0x2e5: 0xef, 0x2e6: 0xf0, 0x2e7: 0xf1,
+ 0x2e8: 0xf2, 0x2e9: 0xf3, 0x2ea: 0xf4, 0x2eb: 0xf5, 0x2ec: 0xf6, 0x2ed: 0xf7, 0x2ee: 0xf8, 0x2ef: 0xf9,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xfa, 0x31f: 0xfb,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff,
+ 0x3a8: 0x46, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x47, 0x3ac: 0x48, 0x3ad: 0x49, 0x3ae: 0x4a, 0x3af: 0x4b,
+ 0x3b0: 0x102, 0x3b1: 0x4c, 0x3b2: 0x4d, 0x3b3: 0x4e, 0x3b4: 0x4f, 0x3b5: 0x50, 0x3b6: 0x103, 0x3b7: 0x51,
+ 0x3b8: 0x52, 0x3b9: 0x53, 0x3ba: 0x54, 0x3bb: 0x55, 0x3bc: 0x56, 0x3bd: 0x57, 0x3be: 0x58, 0x3bf: 0x59,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0x9f, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9b, 0x3c6: 0x108, 0x3c7: 0x109,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f,
+ 0x3d0: 0x110, 0x3d1: 0x9f, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xba, 0x3e6: 0x11c, 0x3e7: 0x11d,
+ 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5a, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5b, 0x3ef: 0xba,
+ 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba,
+ // Block 0x10, offset 0x400
+ 0x400: 0x128, 0x401: 0x129, 0x402: 0x12a, 0x403: 0x12b, 0x404: 0x12c, 0x405: 0x12d, 0x406: 0x12e, 0x407: 0x12f,
+ 0x408: 0x130, 0x409: 0xba, 0x40a: 0x131, 0x40b: 0x132, 0x40c: 0x5c, 0x40d: 0x5d, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x133, 0x411: 0x134, 0x412: 0x135, 0x413: 0x136, 0x414: 0xba, 0x415: 0xba, 0x416: 0x137, 0x417: 0x138,
+ 0x418: 0x139, 0x419: 0x13a, 0x41a: 0x13b, 0x41b: 0x13c, 0x41c: 0x13d, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0xba, 0x421: 0xba, 0x422: 0x13e, 0x423: 0x13f, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,
+ 0x428: 0xba, 0x429: 0xba, 0x42a: 0xba, 0x42b: 0x140, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x141, 0x431: 0x142, 0x432: 0x143, 0x433: 0xba, 0x434: 0xba, 0x435: 0xba, 0x436: 0xba, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x144, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x145, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x146, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x147, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x148, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x140, 0x529: 0x149, 0x52a: 0xba, 0x52b: 0x14a, 0x52c: 0x14b, 0x52d: 0x14c, 0x52e: 0x14d, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x14e, 0x53e: 0x14f, 0x53f: 0x150,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x151,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x152, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x153, 0x581: 0xba, 0x582: 0xba, 0x583: 0xba, 0x584: 0xba, 0x585: 0xba, 0x586: 0xba, 0x587: 0xba,
+ 0x588: 0xba, 0x589: 0xba, 0x58a: 0xba, 0x58b: 0xba, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x154, 0x5b2: 0x155, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x156, 0x5c4: 0x157, 0x5c5: 0x158, 0x5c6: 0x159, 0x5c7: 0x15a,
+ 0x5c8: 0x9b, 0x5c9: 0x15b, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x15c, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65,
+ 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x15d, 0x5e9: 0x15e, 0x5ea: 0x15f, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x160, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x161, 0x624: 0x6e, 0x625: 0x162, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x163, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x164, 0x641: 0x9b, 0x642: 0x165, 0x643: 0x166, 0x644: 0x72, 0x645: 0x73, 0x646: 0x167, 0x647: 0x168,
+ 0x648: 0x74, 0x649: 0x169, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x16a, 0x65c: 0x9b, 0x65d: 0x16b, 0x65e: 0x9b, 0x65f: 0x16c,
+ 0x660: 0x16d, 0x661: 0x16e, 0x662: 0x16f, 0x663: 0xba, 0x664: 0x170, 0x665: 0x171, 0x666: 0x172, 0x667: 0x173,
+ 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x174, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x175, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x176, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x177, 0x73b: 0xba, 0x73c: 0xba, 0x73d: 0xba, 0x73e: 0xba, 0x73f: 0xba,
+ // Block 0x1d, offset 0x740
+ 0x740: 0xba, 0x741: 0xba, 0x742: 0xba, 0x743: 0xba, 0x744: 0xba, 0x745: 0xba, 0x746: 0xba, 0x747: 0xba,
+ 0x748: 0xba, 0x749: 0xba, 0x74a: 0xba, 0x74b: 0xba, 0x74c: 0xba, 0x74d: 0xba, 0x74e: 0xba, 0x74f: 0xba,
+ 0x750: 0xba, 0x751: 0xba, 0x752: 0xba, 0x753: 0xba, 0x754: 0xba, 0x755: 0xba, 0x756: 0xba, 0x757: 0xba,
+ 0x758: 0xba, 0x759: 0xba, 0x75a: 0xba, 0x75b: 0xba, 0x75c: 0xba, 0x75d: 0xba, 0x75e: 0xba, 0x75f: 0xba,
+ 0x760: 0x75, 0x761: 0x76, 0x762: 0x77, 0x763: 0x178, 0x764: 0x78, 0x765: 0x79, 0x766: 0x179, 0x767: 0x7a,
+ 0x768: 0x7b, 0x769: 0xba, 0x76a: 0xba, 0x76b: 0xba, 0x76c: 0xba, 0x76d: 0xba, 0x76e: 0xba, 0x76f: 0xba,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07,
+ 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17,
+ 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07,
+ 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b,
+ 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b,
+ 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b,
+ 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b,
+ 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b,
+ 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b,
+ 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b,
+ 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x17a, 0x801: 0x17b, 0x802: 0xba, 0x803: 0xba, 0x804: 0x17c, 0x805: 0x17c, 0x806: 0x17c, 0x807: 0x17d,
+ 0x808: 0xba, 0x809: 0xba, 0x80a: 0xba, 0x80b: 0xba, 0x80c: 0xba, 0x80d: 0xba, 0x80e: 0xba, 0x80f: 0xba,
+ 0x810: 0xba, 0x811: 0xba, 0x812: 0xba, 0x813: 0xba, 0x814: 0xba, 0x815: 0xba, 0x816: 0xba, 0x817: 0xba,
+ 0x818: 0xba, 0x819: 0xba, 0x81a: 0xba, 0x81b: 0xba, 0x81c: 0xba, 0x81d: 0xba, 0x81e: 0xba, 0x81f: 0xba,
+ 0x820: 0xba, 0x821: 0xba, 0x822: 0xba, 0x823: 0xba, 0x824: 0xba, 0x825: 0xba, 0x826: 0xba, 0x827: 0xba,
+ 0x828: 0xba, 0x829: 0xba, 0x82a: 0xba, 0x82b: 0xba, 0x82c: 0xba, 0x82d: 0xba, 0x82e: 0xba, 0x82f: 0xba,
+ 0x830: 0xba, 0x831: 0xba, 0x832: 0xba, 0x833: 0xba, 0x834: 0xba, 0x835: 0xba, 0x836: 0xba, 0x837: 0xba,
+ 0x838: 0xba, 0x839: 0xba, 0x83a: 0xba, 0x83b: 0xba, 0x83c: 0xba, 0x83d: 0xba, 0x83e: 0xba, 0x83f: 0xba,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b,
+ 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b,
+ 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b,
+ 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b,
+ 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b,
+ 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b,
+ 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b,
+ 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+}
+
+// idnaSparseOffset: 258 entries, 516 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x93, 0x98, 0xa1, 0xb1, 0xbf, 0xcc, 0xd8, 0xe9, 0xf3, 0xfa, 0x107, 0x118, 0x11f, 0x12a, 0x139, 0x147, 0x151, 0x153, 0x158, 0x15b, 0x15e, 0x160, 0x16c, 0x177, 0x17f, 0x185, 0x18b, 0x190, 0x195, 0x198, 0x19c, 0x1a2, 0x1a7, 0x1b3, 0x1bd, 0x1c3, 0x1d4, 0x1de, 0x1e1, 0x1e9, 0x1ec, 0x1f9, 0x201, 0x205, 0x20c, 0x214, 0x224, 0x230, 0x232, 0x23c, 0x248, 0x254, 0x260, 0x268, 0x26d, 0x277, 0x288, 0x28c, 0x297, 0x29b, 0x2a4, 0x2ac, 0x2b2, 0x2b7, 0x2ba, 0x2bd, 0x2c1, 0x2c7, 0x2cb, 0x2cf, 0x2d5, 0x2dc, 0x2e2, 0x2ea, 0x2f1, 0x2fc, 0x306, 0x30a, 0x30d, 0x313, 0x317, 0x319, 0x31c, 0x31e, 0x321, 0x32b, 0x32e, 0x33d, 0x341, 0x346, 0x349, 0x34d, 0x352, 0x357, 0x35d, 0x363, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f6, 0x3fb, 0x408, 0x40c, 0x411, 0x413, 0x417, 0x419, 0x41d, 0x426, 0x42c, 0x430, 0x440, 0x44a, 0x44f, 0x452, 0x458, 0x45f, 0x464, 0x468, 0x46e, 0x473, 0x47c, 0x481, 0x487, 0x48e, 0x495, 0x49c, 0x4a0, 0x4a5, 0x4a8, 0x4ad, 0x4b9, 0x4bf, 0x4c4, 0x4cb, 0x4d3, 0x4d8, 0x4dc, 0x4ec, 0x4f3, 0x4f7, 0x4fb, 0x502, 0x504, 0x507, 0x50a, 0x50e, 0x512, 0x518, 0x521, 0x52d, 0x534, 0x53d, 0x545, 0x54c, 0x55a, 0x567, 0x574, 0x57d, 0x581, 0x58f, 0x597, 0x5a2, 0x5ab, 0x5b1, 0x5b9, 0x5c2, 0x5cc, 0x5cf, 0x5db, 0x5de, 0x5e3, 0x5e6, 0x5f0, 0x5f9, 0x605, 0x608, 0x60d, 0x610, 0x613, 0x616, 0x61d, 0x624, 0x628, 0x633, 0x636, 0x63c, 0x641, 0x645, 0x648, 0x64b, 0x64e, 0x653, 0x65d, 0x660, 0x664, 0x673, 0x67f, 0x683, 0x688, 0x68d, 0x691, 0x696, 0x69f, 0x6aa, 0x6b0, 0x6b8, 0x6bc, 0x6c0, 0x6c6, 0x6cc, 0x6d1, 0x6d4, 0x6e2, 0x6e9, 0x6ec, 0x6ef, 0x6f3, 0x6f9, 0x6fe, 0x708, 0x70d, 0x710, 0x713, 0x716, 0x719, 0x71d, 0x720, 0x730, 0x741, 0x746, 0x748, 0x74a}
+
+// idnaSparseValues: 1869 entries, 7476 bytes
+var idnaSparseValues = [1869]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x6, offset 0x34
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4f
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x63
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0xc, offset 0x6b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x77
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0c08, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0x85},
+ {value: 0x0c08, lo: 0x86, hi: 0x87},
+ {value: 0x0a08, lo: 0x88, hi: 0x88},
+ {value: 0x0c08, lo: 0x89, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0x93},
+ {value: 0x0c08, lo: 0x94, hi: 0x94},
+ {value: 0x0a08, lo: 0x95, hi: 0x95},
+ {value: 0x0808, lo: 0x96, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe, offset 0x85
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xf, offset 0x93
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0x10, offset 0x98
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x11, offset 0xa1
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x12, offset 0xb1
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbf
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xcc
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x15, offset 0xd8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x16, offset 0xe9
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x17, offset 0xf3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x18, offset 0xfa
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x19, offset 0x107
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x1a, offset 0x118
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1b, offset 0x11f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1c, offset 0x12a
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1e, offset 0x147
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1f, offset 0x151
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x20, offset 0x153
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x21, offset 0x158
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x22, offset 0x15b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x23, offset 0x15e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x24, offset 0x160
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x25, offset 0x16c
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x26, offset 0x177
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x17f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x28, offset 0x185
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x29, offset 0x18b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2a, offset 0x190
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2b, offset 0x195
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2c, offset 0x198
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2d, offset 0x19c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2e, offset 0x1a2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2f, offset 0x1a7
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x30, offset 0x1b3
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x31, offset 0x1bd
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x32, offset 0x1c3
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x33, offset 0x1d4
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x34, offset 0x1de
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x35, offset 0x1e1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x36, offset 0x1e9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x37, offset 0x1ec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x38, offset 0x1f9
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x39, offset 0x201
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x3a, offset 0x205
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3b, offset 0x20c
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3c, offset 0x214
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x224
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3e, offset 0x230
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3f, offset 0x232
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x41, offset 0x248
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x42, offset 0x254
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x43, offset 0x260
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x44, offset 0x268
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x45, offset 0x26d
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0x46, offset 0x277
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x47, offset 0x288
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x48, offset 0x28c
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x49, offset 0x297
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x4a, offset 0x29b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4b, offset 0x2a4
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4c, offset 0x2ac
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4d, offset 0x2b2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09c5, lo: 0xa9, hi: 0xa9},
+ {value: 0x09e5, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4e, offset 0x2b7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x4f, offset 0x2ba
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x50, offset 0x2bd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x51, offset 0x2c1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e66, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e86, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x52, offset 0x2c7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x53, offset 0x2cb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x54, offset 0x2cf
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0018, lo: 0xbd, hi: 0xbf},
+ // Block 0x55, offset 0x2d5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xab},
+ {value: 0x0018, lo: 0xac, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x56, offset 0x2dc
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ea5, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x57, offset 0x2e2
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x58, offset 0x2ea
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x59, offset 0x2f1
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x5a, offset 0x2fc
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x5b, offset 0x306
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x5c, offset 0x30a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0xbf},
+ // Block 0x5d, offset 0x30d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0edd, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5e, offset 0x313
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0efd, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5f, offset 0x317
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f1d, lo: 0x80, hi: 0xbf},
+ // Block 0x60, offset 0x319
+ {value: 0x0020, lo: 0x02},
+ {value: 0x171d, lo: 0x80, hi: 0x8f},
+ {value: 0x18fd, lo: 0x90, hi: 0xbf},
+ // Block 0x61, offset 0x31c
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1efd, lo: 0x80, hi: 0xbf},
+ // Block 0x62, offset 0x31e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x63, offset 0x321
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x64, offset 0x32b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x65, offset 0x32e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xb0},
+ {value: 0x2a1d, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a3d, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a5d, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a7d, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a5d, lo: 0xb5, hi: 0xb5},
+ {value: 0x2a9d, lo: 0xb6, hi: 0xb6},
+ {value: 0x2abd, lo: 0xb7, hi: 0xb7},
+ {value: 0x2add, lo: 0xb8, hi: 0xb9},
+ {value: 0x2afd, lo: 0xba, hi: 0xbb},
+ {value: 0x2b1d, lo: 0xbc, hi: 0xbd},
+ {value: 0x2afd, lo: 0xbe, hi: 0xbf},
+ // Block 0x66, offset 0x33d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x67, offset 0x341
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x68, offset 0x346
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x69, offset 0x349
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x6a, offset 0x34d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x6b, offset 0x352
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6c, offset 0x357
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6d, offset 0x35d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6e, offset 0x363
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6f, offset 0x372
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x70, offset 0x378
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x71, offset 0x37c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x72, offset 0x38b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x73, offset 0x390
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x74, offset 0x398
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x75, offset 0x3a2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x76, offset 0x3ad
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x77, offset 0x3b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x78, offset 0x3c6
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x79, offset 0x3cf
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x7a, offset 0x3df
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3ec
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x4465, lo: 0x9c, hi: 0x9c},
+ {value: 0x447d, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xaf},
+ {value: 0x4495, lo: 0xb0, hi: 0xbf},
+ // Block 0x7c, offset 0x3f6
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44b5, lo: 0x80, hi: 0x8f},
+ {value: 0x44d5, lo: 0x90, hi: 0x9f},
+ {value: 0x44f5, lo: 0xa0, hi: 0xaf},
+ {value: 0x44d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x7d, offset 0x3fb
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7e, offset 0x408
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7f, offset 0x40c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x80, offset 0x411
+ {value: 0x0020, lo: 0x01},
+ {value: 0x4515, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x413
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d15, lo: 0x80, hi: 0x94},
+ {value: 0x4ad5, lo: 0x95, hi: 0x95},
+ {value: 0x4fb5, lo: 0x96, hi: 0xbf},
+ // Block 0x82, offset 0x417
+ {value: 0x0020, lo: 0x01},
+ {value: 0x54f5, lo: 0x80, hi: 0xbf},
+ // Block 0x83, offset 0x419
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5cf5, lo: 0x80, hi: 0x84},
+ {value: 0x5655, lo: 0x85, hi: 0x85},
+ {value: 0x5d95, lo: 0x86, hi: 0xbf},
+ // Block 0x84, offset 0x41d
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b55, lo: 0x80, hi: 0x8f},
+ {value: 0x6d15, lo: 0x90, hi: 0x90},
+ {value: 0x6d55, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70b5, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x70d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x85, offset 0x426
+ {value: 0x0020, lo: 0x05},
+ {value: 0x72d5, lo: 0x80, hi: 0xad},
+ {value: 0x6535, lo: 0xae, hi: 0xae},
+ {value: 0x7895, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f55, lo: 0xb6, hi: 0xb6},
+ {value: 0x7975, lo: 0xb7, hi: 0xbf},
+ // Block 0x86, offset 0x42c
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x87, offset 0x430
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x88, offset 0x440
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x89, offset 0x44a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x8a, offset 0x44f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x8b, offset 0x452
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8c, offset 0x458
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8d, offset 0x45f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8e, offset 0x464
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8f, offset 0x468
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x90, offset 0x46e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x91, offset 0x473
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x92, offset 0x47c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x93, offset 0x481
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x94, offset 0x487
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8ad5, lo: 0x98, hi: 0x9f},
+ {value: 0x8aed, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x95, offset 0x48e
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8aed, lo: 0xb0, hi: 0xb7},
+ {value: 0x8ad5, lo: 0xb8, hi: 0xbf},
+ // Block 0x96, offset 0x495
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x97, offset 0x49c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x98, offset 0x4a0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x99, offset 0x4a5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x9a, offset 0x4a8
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x9b, offset 0x4ad
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9c, offset 0x4b9
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9d, offset 0x4bf
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9e, offset 0x4c4
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9f, offset 0x4cb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0xa0, offset 0x4d3
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0xa1, offset 0x4d8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa2, offset 0x4dc
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa3, offset 0x4ec
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa4, offset 0x4f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa5, offset 0x4f7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa6, offset 0x4fb
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa7, offset 0x502
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa8, offset 0x504
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa9, offset 0x507
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xaa, offset 0x50a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xab, offset 0x50e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xac, offset 0x512
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xad, offset 0x518
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xae, offset 0x521
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0340, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xaf, offset 0x52d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb0, offset 0x534
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb1, offset 0x53d
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb2, offset 0x545
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb3, offset 0x54c
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb4, offset 0x55a
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb5, offset 0x567
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb6, offset 0x574
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb7, offset 0x57d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb8, offset 0x581
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xb9, offset 0x58f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xba, offset 0x597
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbb, offset 0x5a2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbc, offset 0x5ab
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbd, offset 0x5b1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbe, offset 0x5b9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xbf, offset 0x5c2
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xc0, offset 0x5cc
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc1, offset 0x5cf
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc2, offset 0x5db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc3, offset 0x5de
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc4, offset 0x5e3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc5, offset 0x5e6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5f0
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xc7, offset 0x5f9
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xc8, offset 0x605
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xc9, offset 0x608
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xca, offset 0x60d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xcb, offset 0x610
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0xcc, offset 0x613
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xcd, offset 0x616
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xce, offset 0x61d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xcf, offset 0x624
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xd0, offset 0x628
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xd1, offset 0x633
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xd2, offset 0x636
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd3, offset 0x63c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xd4, offset 0x641
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0xd5, offset 0x645
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xd6, offset 0x648
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xd7, offset 0x64b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0xbf},
+ // Block 0xd8, offset 0x64e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xd9, offset 0x653
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xda, offset 0x65d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xdb, offset 0x660
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xdc, offset 0x664
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xdd, offset 0x673
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xde, offset 0x67f
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xdf, offset 0x683
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xe0, offset 0x688
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe1, offset 0x68d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xe2, offset 0x691
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xe3, offset 0x696
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xe4, offset 0x69f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xe5, offset 0x6aa
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xe6, offset 0x6b0
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xe7, offset 0x6b8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe8, offset 0x6bc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0xe9, offset 0x6c0
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0xea, offset 0x6c6
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xeb, offset 0x6cc
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1c1, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xec, offset 0x6d1
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0xed, offset 0x6d4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0xc7e9, lo: 0x80, hi: 0x80},
+ {value: 0xc839, lo: 0x81, hi: 0x81},
+ {value: 0xc889, lo: 0x82, hi: 0x82},
+ {value: 0xc8d9, lo: 0x83, hi: 0x83},
+ {value: 0xc929, lo: 0x84, hi: 0x84},
+ {value: 0xc979, lo: 0x85, hi: 0x85},
+ {value: 0xc9c9, lo: 0x86, hi: 0x86},
+ {value: 0xca19, lo: 0x87, hi: 0x87},
+ {value: 0xca69, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcab9, lo: 0x90, hi: 0x90},
+ {value: 0xcad9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xbf},
+ // Block 0xee, offset 0x6e2
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xef, offset 0x6e9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf0, offset 0x6ec
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0xbf},
+ // Block 0xf1, offset 0x6ef
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0xf2, offset 0x6f3
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0xf3, offset 0x6f9
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0xf4, offset 0x6fe
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xf5, offset 0x708
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xf6, offset 0x70d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xbf},
+ // Block 0xf7, offset 0x710
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0xbf},
+ // Block 0xf8, offset 0x713
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xf9, offset 0x716
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xfa, offset 0x719
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xfb, offset 0x71d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xfc, offset 0x720
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xdeb9, lo: 0x80, hi: 0x89},
+ {value: 0x8dfd, lo: 0x8a, hi: 0x8a},
+ {value: 0xdff9, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e1d, lo: 0x9d, hi: 0x9d},
+ {value: 0xe239, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e3d, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2d9, lo: 0xa4, hi: 0xab},
+ {value: 0x7ed5, lo: 0xac, hi: 0xac},
+ {value: 0xe3d9, lo: 0xad, hi: 0xaf},
+ {value: 0x8e5d, lo: 0xb0, hi: 0xb0},
+ {value: 0xe439, lo: 0xb1, hi: 0xb6},
+ {value: 0x8e7d, lo: 0xb7, hi: 0xb9},
+ {value: 0xe4f9, lo: 0xba, hi: 0xba},
+ {value: 0x8edd, lo: 0xbb, hi: 0xbb},
+ {value: 0xe519, lo: 0xbc, hi: 0xbf},
+ // Block 0xfd, offset 0x730
+ {value: 0x0020, lo: 0x10},
+ {value: 0x937d, lo: 0x80, hi: 0x80},
+ {value: 0xf099, lo: 0x81, hi: 0x86},
+ {value: 0x939d, lo: 0x87, hi: 0x8a},
+ {value: 0xd9f9, lo: 0x8b, hi: 0x8b},
+ {value: 0xf159, lo: 0x8c, hi: 0x96},
+ {value: 0x941d, lo: 0x97, hi: 0x97},
+ {value: 0xf2b9, lo: 0x98, hi: 0xa3},
+ {value: 0x943d, lo: 0xa4, hi: 0xa6},
+ {value: 0xf439, lo: 0xa7, hi: 0xaa},
+ {value: 0x949d, lo: 0xab, hi: 0xab},
+ {value: 0xf4b9, lo: 0xac, hi: 0xac},
+ {value: 0x94bd, lo: 0xad, hi: 0xad},
+ {value: 0xf4d9, lo: 0xae, hi: 0xaf},
+ {value: 0x94dd, lo: 0xb0, hi: 0xb1},
+ {value: 0xf519, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0xfe, offset 0x741
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0xff, offset 0x746
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x100, offset 0x748
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x101, offset 0x74a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 41662 bytes (40KiB); checksum: 355A58A4
diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go
new file mode 100644
index 0000000..4212741
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/trie.go
@@ -0,0 +1,51 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package idna
+
+// Sparse block handling code.
+
+type valueRange struct {
+ value uint16 // header: value:stride
+ lo, hi byte // header: lo:n
+}
+
+type sparseBlocks struct {
+ values []valueRange
+ offset []uint16
+}
+
+var idnaSparse = sparseBlocks{
+ values: idnaSparseValues[:],
+ offset: idnaSparseOffset[:],
+}
+
+// Don't use newIdnaTrie to avoid unconditional linking in of the table.
+var trie = &idnaTrie{}
+
+// lookup determines the type of block n and looks up the value for b.
+// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
+// is a list of ranges with an accompanying value. Given a matching range r,
+// the value for b is by r.value + (b - r.lo) * stride.
+func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
+ offset := t.offset[n]
+ header := t.values[offset]
+ lo := offset + 1
+ hi := lo + uint16(header.lo)
+ for lo < hi {
+ m := lo + (hi-lo)/2
+ r := t.values[m]
+ if r.lo <= b && b <= r.hi {
+ return r.value + uint16(b-r.lo)*header.value
+ }
+ if b < r.lo {
+ hi = m
+ } else {
+ lo = m + 1
+ }
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go
new file mode 100644
index 0000000..8a75b96
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/trie12.0.0.go
@@ -0,0 +1,30 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.16
+
+package idna
+
+// appendMapping appends the mapping for the respective rune. isMapped must be
+// true. A mapping is a categorization of a rune as defined in UTS #46.
+func (c info) appendMapping(b []byte, s string) []byte {
+ index := int(c >> indexShift)
+ if c&xorBit == 0 {
+ s := mappings[index:]
+ return append(b, s[1:s[0]+1]...)
+ }
+ b = append(b, s...)
+ if c&inlineXOR == inlineXOR {
+ // TODO: support and handle two-byte inline masks
+ b[len(b)-1] ^= byte(index)
+ } else {
+ for p := len(b) - int(xorData[index]); p < len(b); p++ {
+ index++
+ b[p] ^= xorData[index]
+ }
+ }
+ return b
+}
diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go
new file mode 100644
index 0000000..fa45bb9
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/trie13.0.0.go
@@ -0,0 +1,30 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.16
+
+package idna
+
+// appendMapping appends the mapping for the respective rune. isMapped must be
+// true. A mapping is a categorization of a rune as defined in UTS #46.
+func (c info) appendMapping(b []byte, s string) []byte {
+ index := int(c >> indexShift)
+ if c&xorBit == 0 {
+ p := index
+ return append(b, mappings[mappingIndex[p]:mappingIndex[p+1]]...)
+ }
+ b = append(b, s...)
+ if c&inlineXOR == inlineXOR {
+ // TODO: support and handle two-byte inline masks
+ b[len(b)-1] ^= byte(index)
+ } else {
+ for p := len(b) - int(xorData[index]); p < len(b); p++ {
+ index++
+ b[p] ^= xorData[index]
+ }
+ }
+ return b
+}
diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go
new file mode 100644
index 0000000..9c070a4
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/trieval.go
@@ -0,0 +1,119 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package idna
+
+// This file contains definitions for interpreting the trie value of the idna
+// trie generated by "go run gen*.go". It is shared by both the generator
+// program and the resultant package. Sharing is achieved by the generator
+// copying gen_trieval.go to trieval.go and changing what's above this comment.
+
+// info holds information from the IDNA mapping table for a single rune. It is
+// the value returned by a trie lookup. In most cases, all information fits in
+// a 16-bit value. For mappings, this value may contain an index into a slice
+// with the mapped string. Such mappings can consist of the actual mapped value
+// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
+// input rune. This technique is used by the cases packages and reduces the
+// table size significantly.
+//
+// The per-rune values have the following format:
+//
+// if mapped {
+// if inlinedXOR {
+// 15..13 inline XOR marker
+// 12..11 unused
+// 10..3 inline XOR mask
+// } else {
+// 15..3 index into xor or mapping table
+// }
+// } else {
+// 15..14 unused
+// 13 mayNeedNorm
+// 12..11 attributes
+// 10..8 joining type
+// 7..3 category type
+// }
+// 2 use xor pattern
+// 1..0 mapped category
+//
+// See the definitions below for a more detailed description of the various
+// bits.
+type info uint16
+
+const (
+ catSmallMask = 0x3
+ catBigMask = 0xF8
+ indexShift = 3
+ xorBit = 0x4 // interpret the index as an xor pattern
+ inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
+
+ joinShift = 8
+ joinMask = 0x07
+
+ // Attributes
+ attributesMask = 0x1800
+ viramaModifier = 0x1800
+ modifier = 0x1000
+ rtl = 0x0800
+
+ mayNeedNorm = 0x2000
+)
+
+// A category corresponds to a category defined in the IDNA mapping table.
+type category uint16
+
+const (
+ unknown category = 0 // not currently defined in unicode.
+ mapped category = 1
+ disallowedSTD3Mapped category = 2
+ deviation category = 3
+)
+
+const (
+ valid category = 0x08
+ validNV8 category = 0x18
+ validXV8 category = 0x28
+ disallowed category = 0x40
+ disallowedSTD3Valid category = 0x80
+ ignored category = 0xC0
+)
+
+// join types and additional rune information
+const (
+ joiningL = (iota + 1)
+ joiningD
+ joiningT
+ joiningR
+
+ //the following types are derived during processing
+ joinZWJ
+ joinZWNJ
+ joinVirama
+ numJoinTypes
+)
+
+func (c info) isMapped() bool {
+ return c&0x3 != 0
+}
+
+func (c info) category() category {
+ small := c & catSmallMask
+ if small != 0 {
+ return category(small)
+ }
+ return category(c & catBigMask)
+}
+
+func (c info) joinType() info {
+ if c.isMapped() {
+ return 0
+ }
+ return (c >> joinShift) & joinMask
+}
+
+func (c info) isModifier() bool {
+ return c&(modifier|catSmallMask) == modifier
+}
+
+func (c info) isViramaModifier() bool {
+ return c&(attributesMask|catSmallMask) == viramaModifier
+}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
new file mode 100644
index 0000000..ed14da5
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
@@ -0,0 +1,53 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import "strings"
+
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func asciiEqualFold(s, t string) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if lower(s[i]) != lower(t[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func lower(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// isASCIIPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func isASCIIPrint(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' || s[i] > '~' {
+ return false
+ }
+ }
+ return true
+}
+
+// asciiToLower returns the lowercase version of s if s is ASCII and printable,
+// and whether or not it was.
+func asciiToLower(s string) (lower string, ok bool) {
+ if !isASCIIPrint(s) {
+ return "", false
+ }
+ return strings.ToLower(s), true
+}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
new file mode 100644
index 0000000..92483d8
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
@@ -0,0 +1,115 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import (
+ "net/textproto"
+ "sync"
+)
+
+var (
+ commonBuildOnce sync.Once
+ commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
+ commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
+)
+
+func buildCommonHeaderMapsOnce() {
+ commonBuildOnce.Do(buildCommonHeaderMaps)
+}
+
+func buildCommonHeaderMaps() {
+ common := []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-credentials",
+ "access-control-allow-headers",
+ "access-control-allow-methods",
+ "access-control-allow-origin",
+ "access-control-expose-headers",
+ "access-control-max-age",
+ "access-control-request-headers",
+ "access-control-request-method",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "origin",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "trailer",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ "x-forwarded-for",
+ "x-forwarded-proto",
+ }
+ commonLowerHeader = make(map[string]string, len(common))
+ commonCanonHeader = make(map[string]string, len(common))
+ for _, v := range common {
+ chk := textproto.CanonicalMIMEHeaderKey(v)
+ commonLowerHeader[chk] = v
+ commonCanonHeader[v] = chk
+ }
+}
+
+// LowerHeader returns the lowercase form of a header name,
+// used on the wire for HTTP/2 and HTTP/3 requests.
+func LowerHeader(v string) (lower string, ascii bool) {
+ buildCommonHeaderMapsOnce()
+ if s, ok := commonLowerHeader[v]; ok {
+ return s, true
+ }
+ return asciiToLower(v)
+}
+
+// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
+func CanonicalHeader(v string) string {
+ buildCommonHeaderMapsOnce()
+ if s, ok := commonCanonHeader[v]; ok {
+ return s
+ }
+ return textproto.CanonicalMIMEHeaderKey(v)
+}
+
+// CachedCanonicalHeader returns the canonical form of a well-known header name.
+func CachedCanonicalHeader(v string) (string, bool) {
+ buildCommonHeaderMapsOnce()
+ s, ok := commonCanonHeader[v]
+ return s, ok
+}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
new file mode 100644
index 0000000..4b70553
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -0,0 +1,467 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http/httptrace"
+ "net/textproto"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+)
+
+var (
+ ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
+)
+
+// Request is a subset of http.Request.
+// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http
+// without creating a dependency cycle.
+type Request struct {
+ URL *url.URL
+ Method string
+ Host string
+ Header map[string][]string
+ Trailer map[string][]string
+ ActualContentLength int64 // 0 means 0, -1 means unknown
+}
+
+// EncodeHeadersParam is parameters to EncodeHeaders.
+type EncodeHeadersParam struct {
+ Request Request
+
+ // AddGzipHeader indicates that an "accept-encoding: gzip" header should be
+ // added to the request.
+ AddGzipHeader bool
+
+ // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
+ PeerMaxHeaderListSize uint64
+
+ // DefaultUserAgent is the User-Agent header to send when the request
+ // neither contains a User-Agent nor disables it.
+ DefaultUserAgent string
+}
+
+// EncodeHeadersParam is the result of EncodeHeaders.
+type EncodeHeadersResult struct {
+ HasBody bool
+ HasTrailers bool
+}
+
+// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
+// It validates a request and calls headerf with each pseudo-header and header
+// for the request.
+// The headerf function is called with the validated, canonicalized header name.
+func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
+ req := param.Request
+
+ // Check for invalid connection-level headers.
+ if err := checkConnHeaders(req.Header); err != nil {
+ return res, err
+ }
+
+ if req.URL == nil {
+ return res, errors.New("Request.URL is nil")
+ }
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+ host, err := httpguts.PunycodeHostPort(host)
+ if err != nil {
+ return res, err
+ }
+ if !httpguts.ValidHostHeader(host) {
+ return res, errors.New("invalid Host header")
+ }
+
+ // isNormalConnect is true if this is a non-extended CONNECT request.
+ isNormalConnect := false
+ var protocol string
+ if vv := req.Header[":protocol"]; len(vv) > 0 {
+ protocol = vv[0]
+ }
+ if req.Method == "CONNECT" && protocol == "" {
+ isNormalConnect = true
+ } else if protocol != "" && req.Method != "CONNECT" {
+ return res, errors.New("invalid :protocol header in non-CONNECT request")
+ }
+
+ // Validate the path, except for non-extended CONNECT requests which have no path.
+ var path string
+ if !isNormalConnect {
+ path = req.URL.RequestURI()
+ if !validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return res, fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
+
+ // Check for any invalid headers+trailers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ if err := validateHeaders(req.Header); err != "" {
+ return res, fmt.Errorf("invalid HTTP header %s", err)
+ }
+ if err := validateHeaders(req.Trailer); err != "" {
+ return res, fmt.Errorf("invalid HTTP trailer %s", err)
+ }
+
+ trailers, err := commaSeparatedTrailers(req.Trailer)
+ if err != nil {
+ return res, err
+ }
+
+ enumerateHeaders := func(f func(name, value string)) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production, see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ f(":authority", host)
+ m := req.Method
+ if m == "" {
+ m = "GET"
+ }
+ f(":method", m)
+ if !isNormalConnect {
+ f(":path", path)
+ f(":scheme", req.URL.Scheme)
+ }
+ if protocol != "" {
+ f(":protocol", protocol)
+ }
+ if trailers != "" {
+ f("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ } else if asciiEqualFold(k, "connection") ||
+ asciiEqualFold(k, "proxy-connection") ||
+ asciiEqualFold(k, "transfer-encoding") ||
+ asciiEqualFold(k, "upgrade") ||
+ asciiEqualFold(k, "keep-alive") {
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ } else if asciiEqualFold(k, "user-agent") {
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ } else if asciiEqualFold(k, "cookie") {
+ // Per 8.1.2.5 To allow for better compression efficiency, the
+ // Cookie header field MAY be split into separate header fields,
+ // each with one or more cookie-pairs.
+ for _, v := range vv {
+ for {
+ p := strings.IndexByte(v, ';')
+ if p < 0 {
+ break
+ }
+ f("cookie", v[:p])
+ p++
+ // strip space after semicolon if any.
+ for p+1 <= len(v) && v[p] == ' ' {
+ p++
+ }
+ v = v[p:]
+ }
+ if len(v) > 0 {
+ f("cookie", v)
+ }
+ }
+ continue
+ } else if k == ":protocol" {
+ // :protocol pseudo-header was already sent above.
+ continue
+ }
+
+ for _, v := range vv {
+ f(k, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, req.ActualContentLength) {
+ f("content-length", strconv.FormatInt(req.ActualContentLength, 10))
+ }
+ if param.AddGzipHeader {
+ f("accept-encoding", "gzip")
+ }
+ if !didUA {
+ f("user-agent", param.DefaultUserAgent)
+ }
+ }
+
+ // Do a first pass over the headers counting bytes to ensure
+ // we don't exceed cc.peerMaxHeaderListSize. This is done as a
+ // separate pass before encoding the headers to prevent
+ // modifying the hpack state.
+ if param.PeerMaxHeaderListSize > 0 {
+ hlSize := uint64(0)
+ enumerateHeaders(func(name, value string) {
+ hf := hpack.HeaderField{Name: name, Value: value}
+ hlSize += uint64(hf.Size())
+ })
+
+ if hlSize > param.PeerMaxHeaderListSize {
+ return res, ErrRequestHeaderListSize
+ }
+ }
+
+ trace := httptrace.ContextClientTrace(ctx)
+
+ // Header list size is ok. Write the headers.
+ enumerateHeaders(func(name, value string) {
+ name, ascii := LowerHeader(name)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ return
+ }
+
+ headerf(name, value)
+
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(name, []string{value})
+ }
+ })
+
+ res.HasBody = req.ActualContentLength != 0
+ res.HasTrailers = trailers != ""
+ return res, nil
+}
+
+// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
+// for a request.
+func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool {
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ if !disableCompression &&
+ len(header["Accept-Encoding"]) == 0 &&
+ len(header["Range"]) == 0 &&
+ method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ return true
+ }
+ return false
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+//
+// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
+// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
+//
+// Certain headers are special-cased as okay but not transmitted later.
+// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
+func checkConnHeaders(h map[string][]string) error {
+ if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("invalid Upgrade request header: %q", vv)
+ }
+ if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
+ }
+ if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
+ return fmt.Errorf("invalid Connection request header: %q", vv)
+ }
+ return nil
+}
+
+func commaSeparatedTrailers(trailer map[string][]string) (string, error) {
+ keys := make([]string, 0, len(trailer))
+ for k := range trailer {
+ k = CanonicalHeader(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", fmt.Errorf("invalid Trailer key %q", k)
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// - a non-empty string starting with '/'
+// - the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/') || v == "*"
+}
+
+func validateHeaders(hdrs map[string][]string) string {
+ for k, vv := range hdrs {
+ if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
+ return fmt.Sprintf("name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // Don't include the value in the error,
+ // because it may be sensitive.
+ return fmt.Sprintf("value for header %q", k)
+ }
+ }
+ }
+ return ""
+}
+
+// shouldSendReqContentLength reports whether we should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// ServerRequestParam is parameters to NewServerRequest.
+type ServerRequestParam struct {
+ Method string
+ Scheme, Authority, Path string
+ Protocol string
+ Header map[string][]string
+}
+
+// ServerRequestResult is the result of NewServerRequest.
+type ServerRequestResult struct {
+ // Various http.Request fields.
+ URL *url.URL
+ RequestURI string
+ Trailer map[string][]string
+
+ NeedsContinue bool // client provided an "Expect: 100-continue" header
+
+ // If the request should be rejected, this is a short string suitable for passing
+ // to the http2 package's CountError function.
+ // It might be a bit odd to return errors this way rather than returing an error,
+ // but this ensures we don't forget to include a CountError reason.
+ InvalidReason string
+}
+
+func NewServerRequest(rp ServerRequestParam) ServerRequestResult {
+ needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue")
+ if needsContinue {
+ delete(rp.Header, "Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := rp.Header["Cookie"]; len(cookies) > 1 {
+ rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")}
+ }
+
+ // Setup Trailers
+ var trailer map[string][]string
+ for _, v := range rp.Header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(map[string][]string)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(rp.Header, "Trailer")
+
+ // "':authority' MUST NOT include the deprecated userinfo subcomponent
+ // for "http" or "https" schemed URIs."
+ // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8
+ if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") {
+ return ServerRequestResult{
+ InvalidReason: "userinfo_in_authority",
+ }
+ }
+
+ var url_ *url.URL
+ var requestURI string
+ if rp.Method == "CONNECT" && rp.Protocol == "" {
+ url_ = &url.URL{Host: rp.Authority}
+ requestURI = rp.Authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(rp.Path)
+ if err != nil {
+ return ServerRequestResult{
+ InvalidReason: "bad_path",
+ }
+ }
+ requestURI = rp.Path
+ }
+
+ return ServerRequestResult{
+ URL: url_,
+ NeedsContinue: needsContinue,
+ RequestURI: requestURI,
+ Trailer: trailer,
+ }
+}
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
new file mode 100644
index 0000000..dc5225b
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeseries implements a time series structure for stats collection.
+package timeseries // import "golang.org/x/net/internal/timeseries"
+
+import (
+ "fmt"
+ "log"
+ "time"
+)
+
+const (
+ timeSeriesNumBuckets = 64
+ minuteHourSeriesNumBuckets = 60
+)
+
+var timeSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 10 * time.Second,
+ 1 * time.Minute,
+ 10 * time.Minute,
+ 1 * time.Hour,
+ 6 * time.Hour,
+ 24 * time.Hour, // 1 day
+ 7 * 24 * time.Hour, // 1 week
+ 4 * 7 * 24 * time.Hour, // 4 weeks
+ 16 * 7 * 24 * time.Hour, // 16 weeks
+}
+
+var minuteHourSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 1 * time.Minute,
+}
+
+// An Observable is a kind of data that can be aggregated in a time series.
+type Observable interface {
+ Multiply(ratio float64) // Multiplies the data in self by a given ratio
+ Add(other Observable) // Adds the data from a different observation to self
+ Clear() // Clears the observation so it can be reused.
+ CopyFrom(other Observable) // Copies the contents of a given observation to self
+}
+
+// Float attaches the methods of Observable to a float64.
+type Float float64
+
+// NewFloat returns a Float.
+func NewFloat() Observable {
+ f := Float(0)
+ return &f
+}
+
+// String returns the float as a string.
+func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
+
+// Value returns the float's value.
+func (f *Float) Value() float64 { return float64(*f) }
+
+func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
+
+func (f *Float) Add(other Observable) {
+ o := other.(*Float)
+ *f += *o
+}
+
+func (f *Float) Clear() { *f = 0 }
+
+func (f *Float) CopyFrom(other Observable) {
+ o := other.(*Float)
+ *f = *o
+}
+
+// A Clock tells the current time.
+type Clock interface {
+ Time() time.Time
+}
+
+type defaultClock int
+
+var defaultClockInstance defaultClock
+
+func (defaultClock) Time() time.Time { return time.Now() }
+
+// Information kept per level. Each level consists of a circular list of
+// observations. The start of the level may be derived from end and the
+// len(buckets) * sizeInMillis.
+type tsLevel struct {
+ oldest int // index to oldest bucketed Observable
+ newest int // index to newest bucketed Observable
+ end time.Time // end timestamp for this level
+ size time.Duration // duration of the bucketed Observable
+ buckets []Observable // collections of observations
+ provider func() Observable // used for creating new Observable
+}
+
+func (l *tsLevel) Clear() {
+ l.oldest = 0
+ l.newest = len(l.buckets) - 1
+ l.end = time.Time{}
+ for i := range l.buckets {
+ if l.buckets[i] != nil {
+ l.buckets[i].Clear()
+ l.buckets[i] = nil
+ }
+ }
+}
+
+func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
+ l.size = size
+ l.provider = f
+ l.buckets = make([]Observable, numBuckets)
+}
+
+// Keeps a sequence of levels. Each level is responsible for storing data at
+// a given resolution. For example, the first level stores data at a one
+// minute resolution while the second level stores data at a one hour
+// resolution.
+
+// Each level is represented by a sequence of buckets. Each bucket spans an
+// interval equal to the resolution of the level. New observations are added
+// to the last bucket.
+type timeSeries struct {
+ provider func() Observable // make more Observable
+ numBuckets int // number of buckets in each level
+ levels []*tsLevel // levels of bucketed Observable
+ lastAdd time.Time // time of last Observable tracked
+ total Observable // convenient aggregation of all Observable
+ clock Clock // Clock for getting current time
+ pending Observable // observations not yet bucketed
+ pendingTime time.Time // what time are we keeping in pending
+ dirty bool // if there are pending observations
+}
+
+// init initializes a level according to the supplied criteria.
+func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
+ ts.provider = f
+ ts.numBuckets = numBuckets
+ ts.clock = clock
+ ts.levels = make([]*tsLevel, len(resolutions))
+
+ for i := range resolutions {
+ if i > 0 && resolutions[i-1] >= resolutions[i] {
+ log.Print("timeseries: resolutions must be monotonically increasing")
+ break
+ }
+ newLevel := new(tsLevel)
+ newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
+ ts.levels[i] = newLevel
+ }
+
+ ts.Clear()
+}
+
+// Clear removes all observations from the time series.
+func (ts *timeSeries) Clear() {
+ ts.lastAdd = time.Time{}
+ ts.total = ts.resetObservation(ts.total)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.pendingTime = time.Time{}
+ ts.dirty = false
+
+ for i := range ts.levels {
+ ts.levels[i].Clear()
+ }
+}
+
+// Add records an observation at the current time.
+func (ts *timeSeries) Add(observation Observable) {
+ ts.AddWithTime(observation, ts.clock.Time())
+}
+
+// AddWithTime records an observation at the specified time.
+func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
+
+ smallBucketDuration := ts.levels[0].size
+
+ if t.After(ts.lastAdd) {
+ ts.lastAdd = t
+ }
+
+ if t.After(ts.pendingTime) {
+ ts.advance(t)
+ ts.mergePendingUpdates()
+ ts.pendingTime = ts.levels[0].end
+ ts.pending.CopyFrom(observation)
+ ts.dirty = true
+ } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
+ // The observation is close enough to go into the pending bucket.
+ // This compensates for clock skewing and small scheduling delays
+ // by letting the update stay in the fast path.
+ ts.pending.Add(observation)
+ ts.dirty = true
+ } else {
+ ts.mergeValue(observation, t)
+ }
+}
+
+// mergeValue inserts the observation at the specified time in the past into all levels.
+func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
+ for _, level := range ts.levels {
+ index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
+ if 0 <= index && index < ts.numBuckets {
+ bucketNumber := (level.oldest + index) % ts.numBuckets
+ if level.buckets[bucketNumber] == nil {
+ level.buckets[bucketNumber] = level.provider()
+ }
+ level.buckets[bucketNumber].Add(observation)
+ }
+ }
+ ts.total.Add(observation)
+}
+
+// mergePendingUpdates applies the pending updates into all levels.
+func (ts *timeSeries) mergePendingUpdates() {
+ if ts.dirty {
+ ts.mergeValue(ts.pending, ts.pendingTime)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.dirty = false
+ }
+}
+
+// advance cycles the buckets at each level until the latest bucket in
+// each level can hold the time specified.
+func (ts *timeSeries) advance(t time.Time) {
+ if !t.After(ts.levels[0].end) {
+ return
+ }
+ for i := 0; i < len(ts.levels); i++ {
+ level := ts.levels[i]
+ if !level.end.Before(t) {
+ break
+ }
+
+ // If the time is sufficiently far, just clear the level and advance
+ // directly.
+ if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
+ for _, b := range level.buckets {
+ ts.resetObservation(b)
+ }
+ level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
+ }
+
+ for t.After(level.end) {
+ level.end = level.end.Add(level.size)
+ level.newest = level.oldest
+ level.oldest = (level.oldest + 1) % ts.numBuckets
+ ts.resetObservation(level.buckets[level.newest])
+ }
+
+ t = level.end
+ }
+}
+
+// Latest returns the sum of the num latest buckets from the level.
+func (ts *timeSeries) Latest(level, num int) Observable {
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ result := ts.provider()
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ if l.buckets[index] != nil {
+ result.Add(l.buckets[index])
+ }
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index--
+ }
+
+ return result
+}
+
+// LatestBuckets returns a copy of the num latest buckets from level.
+func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
+ if level < 0 || level > len(ts.levels) {
+ log.Print("timeseries: bad level argument: ", level)
+ return nil
+ }
+ if num < 0 || num >= ts.numBuckets {
+ log.Print("timeseries: bad num argument: ", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ result := ts.provider()
+ results[i] = result
+ if l.buckets[index] != nil {
+ result.CopyFrom(l.buckets[index])
+ }
+
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index -= 1
+ }
+ return results
+}
+
+// ScaleBy updates observations by scaling by factor.
+func (ts *timeSeries) ScaleBy(factor float64) {
+ for _, l := range ts.levels {
+ for i := 0; i < ts.numBuckets; i++ {
+ l.buckets[i].Multiply(factor)
+ }
+ }
+
+ ts.total.Multiply(factor)
+ ts.pending.Multiply(factor)
+}
+
+// Range returns the sum of observations added over the specified time range.
+// If start or finish times don't fall on bucket boundaries of the same
+// level, then return values are approximate answers.
+func (ts *timeSeries) Range(start, finish time.Time) Observable {
+ return ts.ComputeRange(start, finish, 1)[0]
+}
+
+// Recent returns the sum of observations from the last delta.
+func (ts *timeSeries) Recent(delta time.Duration) Observable {
+ now := ts.clock.Time()
+ return ts.Range(now.Add(-delta), now)
+}
+
+// Total returns the total of all observations.
+func (ts *timeSeries) Total() Observable {
+ ts.mergePendingUpdates()
+ return ts.total
+}
+
+// ComputeRange computes a specified number of values into a slice using
+// the observations recorded over the specified time period. The return
+// values are approximate if the start or finish times don't fall on the
+// bucket boundaries at the same level or if the number of buckets spanning
+// the range is not an integral multiple of num.
+func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
+ if start.After(finish) {
+ log.Printf("timeseries: start > finish, %v>%v", start, finish)
+ return nil
+ }
+
+ if num < 0 {
+ log.Printf("timeseries: num < 0, %v", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+
+ for _, l := range ts.levels {
+ if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
+ ts.extract(l, start, finish, num, results)
+ return results
+ }
+ }
+
+ // Failed to find a level that covers the desired range. So just
+ // extract from the last level, even if it doesn't cover the entire
+ // desired range.
+ ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
+
+ return results
+}
+
+// RecentList returns the specified number of values in slice over the most
+// recent time period of the specified range.
+func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
+ if delta < 0 {
+ return nil
+ }
+ now := ts.clock.Time()
+ return ts.ComputeRange(now.Add(-delta), now, num)
+}
+
+// extract returns a slice of specified number of observations from a given
+// level over a given range.
+func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
+ ts.mergePendingUpdates()
+
+ srcInterval := l.size
+ dstInterval := finish.Sub(start) / time.Duration(num)
+ dstStart := start
+ srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
+
+ srcIndex := 0
+
+ // Where should scanning start?
+ if dstStart.After(srcStart) {
+ advance := int(dstStart.Sub(srcStart) / srcInterval)
+ srcIndex += advance
+ srcStart = srcStart.Add(time.Duration(advance) * srcInterval)
+ }
+
+ // The i'th value is computed as show below.
+ // interval = (finish/start)/num
+ // i'th value = sum of observation in range
+ // [ start + i * interval,
+ // start + (i + 1) * interval )
+ for i := 0; i < num; i++ {
+ results[i] = ts.resetObservation(results[i])
+ dstEnd := dstStart.Add(dstInterval)
+ for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
+ srcEnd := srcStart.Add(srcInterval)
+ if srcEnd.After(ts.lastAdd) {
+ srcEnd = ts.lastAdd
+ }
+
+ if !srcEnd.Before(dstStart) {
+ srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
+ if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
+ // dst completely contains src.
+ if srcValue != nil {
+ results[i].Add(srcValue)
+ }
+ } else {
+ // dst partially overlaps src.
+ overlapStart := maxTime(srcStart, dstStart)
+ overlapEnd := minTime(srcEnd, dstEnd)
+ base := srcEnd.Sub(srcStart)
+ fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
+
+ used := ts.provider()
+ if srcValue != nil {
+ used.CopyFrom(srcValue)
+ }
+ used.Multiply(fraction)
+ results[i].Add(used)
+ }
+
+ if srcEnd.After(dstEnd) {
+ break
+ }
+ }
+ srcIndex++
+ srcStart = srcStart.Add(srcInterval)
+ }
+ dstStart = dstStart.Add(dstInterval)
+ }
+}
+
+// resetObservation clears the content so the struct may be reused.
+func (ts *timeSeries) resetObservation(observation Observable) Observable {
+ if observation == nil {
+ observation = ts.provider()
+ } else {
+ observation.Clear()
+ }
+ return observation
+}
+
+// TimeSeries tracks data at granularities from 1 second to 16 weeks.
+type TimeSeries struct {
+ timeSeries
+}
+
+// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
+func NewTimeSeries(f func() Observable) *TimeSeries {
+ return NewTimeSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
+ ts := new(TimeSeries)
+ ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
+ return ts
+}
+
+// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
+type MinuteHourSeries struct {
+ timeSeries
+}
+
+// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
+func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
+ return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
+ ts := new(MinuteHourSeries)
+ ts.timeSeries.init(minuteHourSeriesResolutions, f,
+ minuteHourSeriesNumBuckets, clock)
+ return ts
+}
+
+func (ts *MinuteHourSeries) Minute() Observable {
+ return ts.timeSeries.Latest(0, 60)
+}
+
+func (ts *MinuteHourSeries) Hour() Observable {
+ return ts.timeSeries.Latest(1, 60)
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.Before(b) {
+ return a
+ }
+ return b
+}
+
+func maxTime(a, b time.Time) time.Time {
+ if a.After(b) {
+ return a
+ }
+ return b
+}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
new file mode 100644
index 0000000..c646a69
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -0,0 +1,532 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "text/tabwriter"
+ "time"
+)
+
+const maxEventsPerLog = 100
+
+type bucket struct {
+ MaxErrAge time.Duration
+ String string
+}
+
+var buckets = []bucket{
+ {0, "total"},
+ {10 * time.Second, "errs<10s"},
+ {1 * time.Minute, "errs<1m"},
+ {10 * time.Minute, "errs<10m"},
+ {1 * time.Hour, "errs<1h"},
+ {10 * time.Hour, "errs<10h"},
+ {24000 * time.Hour, "errors"},
+}
+
+// RenderEvents renders the HTML page typically served at /debug/events.
+// It does not do any auth checking. The request may be nil.
+//
+// Most users will use the Events handler.
+func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
+ now := time.Now()
+ data := &struct {
+ Families []string // family names
+ Buckets []bucket
+ Counts [][]int // eventLog count per family/bucket
+
+ // Set when a bucket has been selected.
+ Family string
+ Bucket int
+ EventLogs eventLogs
+ Expanded bool
+ }{
+ Buckets: buckets,
+ }
+
+ data.Families = make([]string, 0, len(families))
+ famMu.RLock()
+ for name := range families {
+ data.Families = append(data.Families, name)
+ }
+ famMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // Count the number of eventLogs in each family for each error age.
+ data.Counts = make([][]int, len(data.Families))
+ for i, name := range data.Families {
+ // TODO(sameer): move this loop under the family lock.
+ f := getEventFamily(name)
+ data.Counts[i] = make([]int, len(data.Buckets))
+ for j, b := range data.Buckets {
+ data.Counts[i][j] = f.Count(now, b.MaxErrAge)
+ }
+ }
+
+ if req != nil {
+ var ok bool
+ data.Family, data.Bucket, ok = parseEventsArgs(req)
+ if !ok {
+ // No-op
+ } else {
+ data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
+ }
+ if data.EventLogs != nil {
+ defer data.EventLogs.Free()
+ sort.Sort(data.EventLogs)
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ }
+
+ famMu.RLock()
+ defer famMu.RUnlock()
+ if err := eventsTmpl().Execute(w, data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < 0 || b >= len(buckets) {
+ return "", 0, false
+ }
+ return fam, b, true
+}
+
+// An EventLog provides a log of events associated with a specific object.
+type EventLog interface {
+ // Printf formats its arguments with fmt.Sprintf and adds the
+ // result to the event log.
+ Printf(format string, a ...interface{})
+
+ // Errorf is like Printf, but it marks this event as an error.
+ Errorf(format string, a ...interface{})
+
+ // Finish declares that this event log is complete.
+ // The event log should not be used after calling this method.
+ Finish()
+}
+
+// NewEventLog returns a new EventLog with the specified family name
+// and title.
+func NewEventLog(family, title string) EventLog {
+ el := newEventLog()
+ el.ref()
+ el.Family, el.Title = family, title
+ el.Start = time.Now()
+ el.events = make([]logEntry, 0, maxEventsPerLog)
+ el.stack = make([]uintptr, 32)
+ n := runtime.Callers(2, el.stack)
+ el.stack = el.stack[:n]
+
+ getEventFamily(family).add(el)
+ return el
+}
+
+func (el *eventLog) Finish() {
+ getEventFamily(el.Family).remove(el)
+ el.unref() // matches ref in New
+}
+
+var (
+ famMu sync.RWMutex
+ families = make(map[string]*eventFamily) // family name => family
+)
+
+func getEventFamily(fam string) *eventFamily {
+ famMu.Lock()
+ defer famMu.Unlock()
+ f := families[fam]
+ if f == nil {
+ f = &eventFamily{}
+ families[fam] = f
+ }
+ return f
+}
+
+type eventFamily struct {
+ mu sync.RWMutex
+ eventLogs eventLogs
+}
+
+func (f *eventFamily) add(el *eventLog) {
+ f.mu.Lock()
+ f.eventLogs = append(f.eventLogs, el)
+ f.mu.Unlock()
+}
+
+func (f *eventFamily) remove(el *eventLog) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ for i, el0 := range f.eventLogs {
+ if el == el0 {
+ copy(f.eventLogs[i:], f.eventLogs[i+1:])
+ f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
+ return
+ }
+ }
+}
+
+func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ n++
+ }
+ }
+ return
+}
+
+func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ els = make(eventLogs, 0, len(f.eventLogs))
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ el.ref()
+ els = append(els, el)
+ }
+ }
+ return
+}
+
+type eventLogs []*eventLog
+
+// Free calls unref on each element of the list.
+func (els eventLogs) Free() {
+ for _, el := range els {
+ el.unref()
+ }
+}
+
+// eventLogs may be sorted in reverse chronological order.
+func (els eventLogs) Len() int { return len(els) }
+func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
+func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
+
+// A logEntry is a timestamped log entry in an event log.
+type logEntry struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in log
+ NewDay bool // whether this event is on a different day to the previous event
+ What string
+ IsErr bool
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e logEntry) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// An eventLog represents an active event log.
+type eventLog struct {
+ // Family is the top-level grouping of event logs to which this belongs.
+ Family string
+
+ // Title is the title of this event log.
+ Title string
+
+ // Timing information.
+ Start time.Time
+
+ // Call stack where this event log was created.
+ stack []uintptr
+
+ // Append-only sequence of events.
+ //
+ // TODO(sameer): change this to a ring buffer to avoid the array copy
+ // when we hit maxEventsPerLog.
+ mu sync.RWMutex
+ events []logEntry
+ LastErrorTime time.Time
+ discarded int
+
+ refs int32 // how many buckets this is in
+}
+
+func (el *eventLog) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ el.Family = ""
+ el.Title = ""
+ el.Start = time.Time{}
+ el.stack = nil
+ el.events = nil
+ el.LastErrorTime = time.Time{}
+ el.discarded = 0
+ el.refs = 0
+}
+
+func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
+ if maxErrAge == 0 {
+ return true
+ }
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return now.Sub(el.LastErrorTime) < maxErrAge
+}
+
+// delta returns the elapsed time since the last event or the log start,
+// and whether it spans midnight.
+// L >= el.mu
+func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
+ if len(el.events) == 0 {
+ return t.Sub(el.Start), false
+ }
+ prev := el.events[len(el.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+
+}
+
+func (el *eventLog) Printf(format string, a ...interface{}) {
+ el.printf(false, format, a...)
+}
+
+func (el *eventLog) Errorf(format string, a ...interface{}) {
+ el.printf(true, format, a...)
+}
+
+func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
+ e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
+ el.mu.Lock()
+ e.Elapsed, e.NewDay = el.delta(e.When)
+ if len(el.events) < maxEventsPerLog {
+ el.events = append(el.events, e)
+ } else {
+ // Discard the oldest event.
+ if el.discarded == 0 {
+ // el.discarded starts at two to count for the event it
+ // is replacing, plus the next one that we are about to
+ // drop.
+ el.discarded = 2
+ } else {
+ el.discarded++
+ }
+ // TODO(sameer): if this causes allocations on a critical path,
+ // change eventLog.What to be a fmt.Stringer, as in trace.go.
+ el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ el.events[0].When = el.events[1].When
+ copy(el.events[1:], el.events[2:])
+ el.events[maxEventsPerLog-1] = e
+ }
+ if e.IsErr {
+ el.LastErrorTime = e.When
+ }
+ el.mu.Unlock()
+}
+
+func (el *eventLog) ref() {
+ atomic.AddInt32(&el.refs, 1)
+}
+
+func (el *eventLog) unref() {
+ if atomic.AddInt32(&el.refs, -1) == 0 {
+ freeEventLog(el)
+ }
+}
+
+func (el *eventLog) When() string {
+ return el.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (el *eventLog) ElapsedTime() string {
+ elapsed := time.Since(el.Start)
+ return fmt.Sprintf("%.6f", elapsed.Seconds())
+}
+
+func (el *eventLog) Stack() string {
+ buf := new(bytes.Buffer)
+ tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
+ printStackRecord(tw, el.stack)
+ tw.Flush()
+ return buf.String()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+// Adapted from runtime/pprof/pprof.go.
+func printStackRecord(w io.Writer, stk []uintptr) {
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ continue
+ }
+ file, line := f.FileLine(pc)
+ name := f.Name()
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ if strings.HasPrefix(name, "runtime.") {
+ continue
+ }
+ fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
+ }
+}
+
+func (el *eventLog) Events() []logEntry {
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return el.events
+}
+
+// freeEventLogs is a freelist of *eventLog
+var freeEventLogs = make(chan *eventLog, 1000)
+
+// newEventLog returns a event log ready to use.
+func newEventLog() *eventLog {
+ select {
+ case el := <-freeEventLogs:
+ return el
+ default:
+ return new(eventLog)
+ }
+}
+
+// freeEventLog adds el to freeEventLogs if there's room.
+// This is non-blocking.
+func freeEventLog(el *eventLog) {
+ el.reset()
+ select {
+ case freeEventLogs <- el:
+ default:
+ }
+}
+
+var eventsTmplCache *template.Template
+var eventsTmplOnce sync.Once
+
+func eventsTmpl() *template.Template {
+ eventsTmplOnce.Do(func() {
+ eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "trimSpace": strings.TrimSpace,
+ }).Parse(eventsHTML))
+ })
+ return eventsTmplCache
+}
+
+const eventsHTML = `
+
+
+ events
+
+
+
+
+/debug/events
+
+
+
+{{if $.EventLogs}}
+
+Family: {{$.Family}}
+
+{{if $.Expanded}}{{end}}
+[Summary]{{if $.Expanded}} {{end}}
+
+{{if not $.Expanded}}{{end}}
+[Expanded]{{if not $.Expanded}} {{end}}
+
+
+ When Elapsed
+ {{range $el := $.EventLogs}}
+
+ {{$el.When}}
+ {{$el.ElapsedTime}}
+ {{$el.Title}}
+
+ {{if $.Expanded}}
+
+
+
+ {{$el.Stack|trimSpace}}
+
+ {{range $el.Events}}
+
+ {{.WhenString}}
+ {{elapsed .Elapsed}}
+ .{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+
+ {{end}}
+ {{end}}
+ {{end}}
+
+{{end}}
+
+
+`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
new file mode 100644
index 0000000..d6c7110
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,365 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "log"
+ "math"
+ "sync"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+const (
+ bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+ sum int64 // running total of measurements
+ sumOfSquares float64 // square of running total
+ buckets []int64 // bucketed values for histogram
+ value int // holds a single value as an optimization
+ valueCount int64 // number of values recorded for single value
+}
+
+// addMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+ // TODO: assert invariant
+ h.sum += value
+ h.sumOfSquares += float64(value) * float64(value)
+
+ bucketIndex := getBucket(value)
+
+ if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+ h.value = bucketIndex
+ h.valueCount++
+ } else {
+ h.allocateBuckets()
+ h.buckets[bucketIndex]++
+ }
+}
+
+func (h *histogram) allocateBuckets() {
+ if h.buckets == nil {
+ h.buckets = make([]int64, bucketCount)
+ h.buckets[h.value] = h.valueCount
+ h.value = 0
+ h.valueCount = -1
+ }
+}
+
+func log2(i int64) int {
+ n := 0
+ for ; i >= 0x100; i >>= 8 {
+ n += 8
+ }
+ for ; i > 0; i >>= 1 {
+ n += 1
+ }
+ return n
+}
+
+func getBucket(i int64) (index int) {
+ index = log2(i) - 1
+ if index < 0 {
+ index = 0
+ }
+ if index >= bucketCount {
+ index = bucketCount - 1
+ }
+ return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+ if h.valueCount >= 0 {
+ total = h.valueCount
+ }
+ for _, val := range h.buckets {
+ total += int64(val)
+ }
+ return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+ t := h.total()
+ if t == 0 {
+ return 0
+ }
+ return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+ t := float64(h.total())
+ if t == 0 {
+ return 0
+ }
+ s := float64(h.sum) / t
+ return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+ return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+ total := h.total()
+
+ // Corner cases (make sure result is strictly less than Total())
+ if total == 0 {
+ return 0
+ } else if total == 1 {
+ return int64(h.average())
+ }
+
+ percentOfTotal := round(float64(total) * percentile)
+ var runningTotal int64
+
+ for i := range h.buckets {
+ value := h.buckets[i]
+ runningTotal += value
+ if runningTotal == percentOfTotal {
+ // We hit an exact bucket boundary. If the next bucket has data, it is a
+ // good estimate of the value. If the bucket is empty, we interpolate the
+ // midpoint between the next bucket's boundary and the next non-zero
+ // bucket. If the remaining buckets are all empty, then we use the
+ // boundary for the next bucket as the estimate.
+ j := uint8(i + 1)
+ min := bucketBoundary(j)
+ if runningTotal < total {
+ for h.buckets[j] == 0 {
+ j++
+ }
+ }
+ max := bucketBoundary(j)
+ return min + round(float64(max-min)/2)
+ } else if runningTotal > percentOfTotal {
+ // The value is in this bucket. Interpolate the value.
+ delta := runningTotal - percentOfTotal
+ percentBucket := float64(value-delta) / float64(value)
+ bucketMin := bucketBoundary(uint8(i))
+ nextBucketMin := bucketBoundary(uint8(i + 1))
+ bucketSize := nextBucketMin - bucketMin
+ return bucketMin + round(percentBucket*float64(bucketSize))
+ }
+ }
+ return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+ return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == 0 {
+ // Other histogram is empty
+ } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+ // Both have a single bucketed value, aggregate them
+ h.valueCount += o.valueCount
+ } else {
+ // Two different values necessitate buckets in this histogram
+ h.allocateBuckets()
+ if o.valueCount >= 0 {
+ h.buckets[o.value] += o.valueCount
+ } else {
+ for i := range h.buckets {
+ h.buckets[i] += o.buckets[i]
+ }
+ }
+ }
+ h.sumOfSquares += o.sumOfSquares
+ h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+ h.buckets = nil
+ h.value = 0
+ h.valueCount = 0
+ h.sum = 0
+ h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == -1 {
+ h.allocateBuckets()
+ copy(h.buckets, o.buckets)
+ }
+ h.sum = o.sum
+ h.sumOfSquares = o.sumOfSquares
+ h.value = o.value
+ h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+ if h.valueCount == -1 {
+ for i := range h.buckets {
+ h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+ }
+ } else {
+ h.valueCount = int64(float64(h.valueCount) * ratio)
+ }
+ h.sum = int64(float64(h.sum) * ratio)
+ h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+ r := new(histogram)
+ r.Clear()
+ return r
+}
+
+func (h *histogram) String() string {
+ return fmt.Sprintf("%d, %f, %d, %d, %v",
+ h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+ return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+ if bucket == 0 {
+ return 0
+ }
+ return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+ Lower, Upper int64
+ N int64
+ Pct, CumulativePct float64
+ GraphWidth int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+ Buckets []*bucketData
+ Count, Median int64
+ Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+ // Force the allocation of buckets to simplify the rendering implementation
+ h.allocateBuckets()
+ // We scale the bars on the right so that the largest bar is
+ // maxHTMLBarWidth pixels in width.
+ maxBucket := int64(0)
+ for _, n := range h.buckets {
+ if n > maxBucket {
+ maxBucket = n
+ }
+ }
+ total := h.total()
+ barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+ var pctMult float64
+ if total == 0 {
+ pctMult = 1.0
+ } else {
+ pctMult = 100.0 / float64(total)
+ }
+
+ buckets := make([]*bucketData, len(h.buckets))
+ runningTotal := int64(0)
+ for i, n := range h.buckets {
+ if n == 0 {
+ continue
+ }
+ runningTotal += n
+ var upperBound int64
+ if i < bucketCount-1 {
+ upperBound = bucketBoundary(uint8(i + 1))
+ } else {
+ upperBound = math.MaxInt64
+ }
+ buckets[i] = &bucketData{
+ Lower: bucketBoundary(uint8(i)),
+ Upper: upperBound,
+ N: n,
+ Pct: float64(n) * pctMult,
+ CumulativePct: float64(runningTotal) * pctMult,
+ GraphWidth: int(float64(n) * barsizeMult),
+ }
+ }
+ return &data{
+ Buckets: buckets,
+ Count: total,
+ Median: h.median(),
+ Mean: h.average(),
+ StandardDeviation: h.standardDeviation(),
+ }
+}
+
+func (h *histogram) html() template.HTML {
+ buf := new(bytes.Buffer)
+ if err := distTmpl().Execute(buf, h.newData()); err != nil {
+ buf.Reset()
+ log.Printf("net/trace: couldn't execute template: %v", err)
+ }
+ return template.HTML(buf.String())
+}
+
+var distTmplCache *template.Template
+var distTmplOnce sync.Once
+
+func distTmpl() *template.Template {
+ distTmplOnce.Do(func() {
+ // Input: data
+ distTmplCache = template.Must(template.New("distTmpl").Parse(`
+
+
+ Count: {{.Count}}
+ Mean: {{printf "%.0f" .Mean}}
+ StdDev: {{printf "%.0f" .StandardDeviation}}
+ Median: {{.Median}}
+
+
+
+
+{{range $b := .Buckets}}
+{{if $b}}
+
+ [
+ {{.Lower}},
+ {{.Upper}})
+ {{.N}}
+ {{printf "%#.3f" .Pct}}%
+ {{printf "%#.3f" .CumulativePct}}%
+
+
+{{end}}
+{{end}}
+
+`))
+ })
+ return distTmplCache
+}
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
new file mode 100644
index 0000000..eae2a99
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1130 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+ func fooHandler(w http.ResponseWriter, req *http.Request) {
+ tr := trace.New("mypkg.Foo", req.URL.Path)
+ defer tr.Finish()
+ ...
+ tr.LazyPrintf("some event %q happened", str)
+ ...
+ if err := somethingImportant(); err != nil {
+ tr.LazyPrintf("somethingImportant failed: %v", err)
+ tr.SetError()
+ }
+ }
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration. It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+ // A Fetcher fetches URL paths for a single domain.
+ type Fetcher struct {
+ domain string
+ events trace.EventLog
+ }
+
+ func NewFetcher(domain string) *Fetcher {
+ return &Fetcher{
+ domain,
+ trace.NewEventLog("mypkg.Fetcher", domain),
+ }
+ }
+
+ func (f *Fetcher) Fetch(path string) (string, error) {
+ resp, err := http.Get("http://" + f.domain + "/" + path)
+ if err != nil {
+ f.events.Errorf("Get(%q) = %v", path, err)
+ return "", err
+ }
+ f.events.Printf("Get(%q) = %s", path, resp.Status)
+ ...
+ }
+
+ func (f *Fetcher) Close() error {
+ f.events.Finish()
+ return nil
+ }
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error. The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace // import "golang.org/x/net/trace"
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// HTTP ServeMux paths.
+const (
+ debugRequestsPath = "/debug/requests"
+ debugEventsPath = "/debug/events"
+)
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customize its authorization requirements.
+//
+// The default AuthRequest function returns (true, true) if and only if the request
+// comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+ // RemoteAddr is commonly in the form "IP" or "IP:port".
+ // If it is in the form "IP:port", split off the port.
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ host = req.RemoteAddr
+ }
+ switch host {
+ case "localhost", "127.0.0.1", "::1":
+ return true, true
+ default:
+ return false, false
+ }
+}
+
+func init() {
+ _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}})
+ if pat == debugRequestsPath {
+ panic("/debug/requests is already registered. You may have two independent copies of " +
+ "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " +
+ "involve a vendored copy of golang.org/x/net/trace.")
+ }
+
+ // TODO(jbd): Serve Traces from /debug/traces in the future?
+ // There is no requirement for a request to be present to have traces.
+ http.HandleFunc(debugRequestsPath, Traces)
+ http.HandleFunc(debugEventsPath, Events)
+}
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+ return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+ tr, ok = ctx.Value(contextKey).(Trace)
+ return
+}
+
+// Traces responds with traces from the program.
+// The package initialization registers it in http.DefaultServeMux
+// at /debug/requests.
+//
+// It performs authorization by running AuthRequest.
+func Traces(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ Render(w, req, sensitive)
+}
+
+// Events responds with a page of events collected by EventLogs.
+// The package initialization registers it in http.DefaultServeMux
+// at /debug/events.
+//
+// It performs authorization by running AuthRequest.
+func Events(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ RenderEvents(w, req, sensitive)
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking. The request may be nil.
+//
+// Most users will use the Traces handler.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+ data := &struct {
+ Families []string
+ ActiveTraceCount map[string]int
+ CompletedTraces map[string]*family
+
+ // Set when a bucket has been selected.
+ Traces traceList
+ Family string
+ Bucket int
+ Expanded bool
+ Traced bool
+ Active bool
+ ShowSensitive bool // whether to show sensitive events
+
+ Histogram template.HTML
+ HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+ // If non-zero, the set of traces is a partial set,
+ // and this is the total number.
+ Total int
+ }{
+ CompletedTraces: completedTraces,
+ }
+
+ data.ShowSensitive = sensitive
+ if req != nil {
+ // Allow show_sensitive=0 to force hiding of sensitive data for testing.
+ // This only goes one way; you can't use show_sensitive=1 to see things.
+ if req.FormValue("show_sensitive") == "0" {
+ data.ShowSensitive = false
+ }
+
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+ data.Traced = exp
+ }
+ }
+
+ completedMu.RLock()
+ data.Families = make([]string, 0, len(completedTraces))
+ for fam := range completedTraces {
+ data.Families = append(data.Families, fam)
+ }
+ completedMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // We are careful here to minimize the time spent locking activeMu,
+ // since that lock is required every time an RPC starts and finishes.
+ data.ActiveTraceCount = make(map[string]int, len(data.Families))
+ activeMu.RLock()
+ for fam, s := range activeTraces {
+ data.ActiveTraceCount[fam] = s.Len()
+ }
+ activeMu.RUnlock()
+
+ var ok bool
+ data.Family, data.Bucket, ok = parseArgs(req)
+ switch {
+ case !ok:
+ // No-op
+ case data.Bucket == -1:
+ data.Active = true
+ n := data.ActiveTraceCount[data.Family]
+ data.Traces = getActiveTraces(data.Family)
+ if len(data.Traces) < n {
+ data.Total = n
+ }
+ case data.Bucket < bucketsPerFamily:
+ if b := lookupBucket(data.Family, data.Bucket); b != nil {
+ data.Traces = b.Copy(data.Traced)
+ }
+ default:
+ if f := getFamily(data.Family, false); f != nil {
+ var obs timeseries.Observable
+ f.LatencyMu.RLock()
+ switch o := data.Bucket - bucketsPerFamily; o {
+ case 0:
+ obs = f.Latency.Minute()
+ data.HistogramWindow = "last minute"
+ case 1:
+ obs = f.Latency.Hour()
+ data.HistogramWindow = "last hour"
+ case 2:
+ obs = f.Latency.Total()
+ data.HistogramWindow = "all time"
+ }
+ f.LatencyMu.RUnlock()
+ if obs != nil {
+ data.Histogram = obs.(*histogram).html()
+ }
+ }
+ }
+
+ if data.Traces != nil {
+ defer data.Traces.Free()
+ sort.Sort(data.Traces)
+ }
+
+ completedMu.RLock()
+ defer completedMu.RUnlock()
+ if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+ if req == nil {
+ return "", 0, false
+ }
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < -1 {
+ return "", 0, false
+ }
+
+ return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+ f := getFamily(fam, false)
+ if f == nil || b < 0 || b >= len(f.Buckets) {
+ return nil
+ }
+ return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// Trace represents an active request.
+type Trace interface {
+ // LazyLog adds x to the event log. It will be evaluated each time the
+ // /debug/requests page is rendered. Any memory referenced by x will be
+ // pinned until the trace is finished and later discarded.
+ LazyLog(x fmt.Stringer, sensitive bool)
+
+ // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+ // /debug/requests page is rendered. Any memory referenced by a will be
+ // pinned until the trace is finished and later discarded.
+ LazyPrintf(format string, a ...interface{})
+
+ // SetError declares that this trace resulted in an error.
+ SetError()
+
+ // SetRecycler sets a recycler for the trace.
+ // f will be called for each event passed to LazyLog at a time when
+ // it is no longer required, whether while the trace is still active
+ // and the event is discarded, or when a completed trace is discarded.
+ SetRecycler(f func(interface{}))
+
+ // SetTraceInfo sets the trace info for the trace.
+ // This is currently unused.
+ SetTraceInfo(traceID, spanID uint64)
+
+ // SetMaxEvents sets the maximum number of events that will be stored
+ // in the trace. This has no effect if any events have already been
+ // added to the trace.
+ SetMaxEvents(m int)
+
+ // Finish declares that this trace is complete.
+ // The trace should not be used after calling this method.
+ Finish()
+}
+
+type lazySprintf struct {
+ format string
+ a []interface{}
+}
+
+func (l *lazySprintf) String() string {
+ return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+ tr := newTrace()
+ tr.ref()
+ tr.Family, tr.Title = family, title
+ tr.Start = time.Now()
+ tr.maxEvents = maxEventsPerTrace
+ tr.events = tr.eventsBuf[:0]
+
+ activeMu.RLock()
+ s := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ if s == nil {
+ activeMu.Lock()
+ s = activeTraces[tr.Family] // check again
+ if s == nil {
+ s = new(traceSet)
+ activeTraces[tr.Family] = s
+ }
+ activeMu.Unlock()
+ }
+ s.Add(tr)
+
+ // Trigger allocation of the completed trace structure for this family.
+ // This will cause the family to be present in the request page during
+ // the first trace of this family. We don't care about the return value,
+ // nor is there any need for this to run inline, so we execute it in its
+ // own goroutine, but only if the family isn't allocated yet.
+ completedMu.RLock()
+ if _, ok := completedTraces[tr.Family]; !ok {
+ go allocFamily(tr.Family)
+ }
+ completedMu.RUnlock()
+
+ return tr
+}
+
+func (tr *trace) Finish() {
+ elapsed := time.Since(tr.Start)
+ tr.mu.Lock()
+ tr.Elapsed = elapsed
+ tr.mu.Unlock()
+
+ if DebugUseAfterFinish {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ tr.finishStack = buf[:n]
+ }
+
+ activeMu.RLock()
+ m := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ m.Remove(tr)
+
+ f := getFamily(tr.Family, true)
+ tr.mu.RLock() // protects tr fields in Cond.match calls
+ for _, b := range f.Buckets {
+ if b.Cond.match(tr) {
+ b.Add(tr)
+ }
+ }
+ tr.mu.RUnlock()
+
+ // Add a sample of elapsed time as microseconds to the family's timeseries
+ h := new(histogram)
+ h.addMeasurement(elapsed.Nanoseconds() / 1e3)
+ f.LatencyMu.Lock()
+ f.Latency.Add(h)
+ f.LatencyMu.Unlock()
+
+ tr.unref() // matches ref in New
+}
+
+const (
+ bucketsPerFamily = 9
+ tracesPerBucket = 10
+ maxActiveTraces = 20 // Maximum number of active traces to show.
+ maxEventsPerTrace = 10
+ numHistogramBuckets = 38
+)
+
+var (
+ // The active traces.
+ activeMu sync.RWMutex
+ activeTraces = make(map[string]*traceSet) // family -> traces
+
+ // Families of completed traces.
+ completedMu sync.RWMutex
+ completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+ mu sync.RWMutex
+ m map[*trace]bool
+
+ // We could avoid the entire map scan in FirstN by having a slice of all the traces
+ // ordered by start time, and an index into that from the trace struct, with a periodic
+ // repack of the slice after enough traces finish; we could also use a skip list or similar.
+ // However, that would shift some of the expense from /debug/requests time to RPC time,
+ // which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+ return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+ ts.mu.Lock()
+ if ts.m == nil {
+ ts.m = make(map[*trace]bool)
+ }
+ ts.m[tr] = true
+ ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+ ts.mu.Lock()
+ delete(ts.m, tr)
+ ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+
+ if n > len(ts.m) {
+ n = len(ts.m)
+ }
+ trl := make(traceList, 0, n)
+
+ // Fast path for when no selectivity is needed.
+ if n == len(ts.m) {
+ for tr := range ts.m {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ sort.Sort(trl)
+ return trl
+ }
+
+ // Pick the oldest n traces.
+ // This is inefficient. See the comment in the traceSet struct.
+ for tr := range ts.m {
+ // Put the first n traces into trl in the order they occur.
+ // When we have n, sort trl, and thereafter maintain its order.
+ if len(trl) < n {
+ tr.ref()
+ trl = append(trl, tr)
+ if len(trl) == n {
+ // This is guaranteed to happen exactly once during this loop.
+ sort.Sort(trl)
+ }
+ continue
+ }
+ if tr.Start.After(trl[n-1].Start) {
+ continue
+ }
+
+ // Find where to insert this one.
+ tr.ref()
+ i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+ trl[n-1].unref()
+ copy(trl[i+1:], trl[i:])
+ trl[i] = tr
+ }
+
+ return trl
+}
+
+func getActiveTraces(fam string) traceList {
+ activeMu.RLock()
+ s := activeTraces[fam]
+ activeMu.RUnlock()
+ if s == nil {
+ return nil
+ }
+ return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+ completedMu.RLock()
+ f := completedTraces[fam]
+ completedMu.RUnlock()
+ if f == nil && allocNew {
+ f = allocFamily(fam)
+ }
+ return f
+}
+
+func allocFamily(fam string) *family {
+ completedMu.Lock()
+ defer completedMu.Unlock()
+ f := completedTraces[fam]
+ if f == nil {
+ f = newFamily()
+ completedTraces[fam] = f
+ }
+ return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+ // traces may occur in multiple buckets.
+ Buckets [bucketsPerFamily]*traceBucket
+
+ // latency time series
+ LatencyMu sync.RWMutex
+ Latency *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+ return &family{
+ Buckets: [bucketsPerFamily]*traceBucket{
+ {Cond: minCond(0)},
+ {Cond: minCond(50 * time.Millisecond)},
+ {Cond: minCond(100 * time.Millisecond)},
+ {Cond: minCond(200 * time.Millisecond)},
+ {Cond: minCond(500 * time.Millisecond)},
+ {Cond: minCond(1 * time.Second)},
+ {Cond: minCond(10 * time.Second)},
+ {Cond: minCond(100 * time.Second)},
+ {Cond: errorCond{}},
+ },
+ Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+ }
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+ Cond cond
+
+ // Ring buffer implementation of a fixed-size FIFO queue.
+ mu sync.RWMutex
+ buf [tracesPerBucket]*trace
+ start int // < tracesPerBucket
+ length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ i := b.start + b.length
+ if i >= tracesPerBucket {
+ i -= tracesPerBucket
+ }
+ if b.length == tracesPerBucket {
+ // "Remove" an element from the bucket.
+ b.buf[i].unref()
+ b.start++
+ if b.start == tracesPerBucket {
+ b.start = 0
+ }
+ }
+ b.buf[i] = tr
+ if b.length < tracesPerBucket {
+ b.length++
+ }
+ tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+
+ trl := make(traceList, 0, b.length)
+ for i, x := 0, b.start; i < b.length; i++ {
+ tr := b.buf[x]
+ if !tracedOnly || tr.spanID != 0 {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ x++
+ if x == b.length {
+ x = 0
+ }
+ }
+ return trl
+}
+
+func (b *traceBucket) Empty() bool {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+ match(t *trace) bool
+ String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+ for _, t := range trl {
+ t.unref()
+ }
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in trace
+ NewDay bool // whether this event is on a different day to the previous event
+ Recyclable bool // whether this event was passed via LazyLog
+ Sensitive bool // whether this event contains sensitive information
+ What interface{} // string or fmt.Stringer
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+ return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+ // Family is the top-level grouping of traces to which this belongs.
+ Family string
+
+ // Title is the title of this trace.
+ Title string
+
+ // Start time of the this trace.
+ Start time.Time
+
+ mu sync.RWMutex
+ events []event // Append-only sequence of events (modulo discards).
+ maxEvents int
+ recycler func(interface{})
+ IsError bool // Whether this trace resulted in an error.
+ Elapsed time.Duration // Elapsed time for this trace, zero while active.
+ traceID uint64 // Trace information if non-zero.
+ spanID uint64
+
+ refs int32 // how many buckets this is in
+ disc discarded // scratch space to avoid allocation
+
+ finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+
+ eventsBuf [4]event // preallocated buffer in case we only log a few events
+}
+
+func (tr *trace) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ tr.Family = ""
+ tr.Title = ""
+ tr.Start = time.Time{}
+
+ tr.mu.Lock()
+ tr.Elapsed = 0
+ tr.traceID = 0
+ tr.spanID = 0
+ tr.IsError = false
+ tr.maxEvents = 0
+ tr.events = nil
+ tr.recycler = nil
+ tr.mu.Unlock()
+
+ tr.refs = 0
+ tr.disc = 0
+ tr.finishStack = nil
+ for i := range tr.eventsBuf {
+ tr.eventsBuf[i] = event{}
+ }
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+ if len(tr.events) == 0 {
+ return t.Sub(tr.Start), false
+ }
+ prev := tr.events[len(tr.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+ if DebugUseAfterFinish && tr.finishStack != nil {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+ }
+
+ /*
+ NOTE TO DEBUGGERS
+
+ If you are here because your program panicked in this code,
+ it is almost definitely the fault of code using this package,
+ and very unlikely to be the fault of this code.
+
+ The most likely scenario is that some code elsewhere is using
+ a trace.Trace after its Finish method is called.
+ You can temporarily set the DebugUseAfterFinish var
+ to help discover where that is; do not leave that var set,
+ since it makes this package much less efficient.
+ */
+
+ e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+ tr.mu.Lock()
+ e.Elapsed, e.NewDay = tr.delta(e.When)
+ if len(tr.events) < tr.maxEvents {
+ tr.events = append(tr.events, e)
+ } else {
+ // Discard the middle events.
+ di := int((tr.maxEvents - 1) / 2)
+ if d, ok := tr.events[di].What.(*discarded); ok {
+ (*d)++
+ } else {
+ // disc starts at two to count for the event it is replacing,
+ // plus the next one that we are about to drop.
+ tr.disc = 2
+ if tr.recycler != nil && tr.events[di].Recyclable {
+ go tr.recycler(tr.events[di].What)
+ }
+ tr.events[di].What = &tr.disc
+ }
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ tr.events[di].When = tr.events[di+1].When
+
+ if tr.recycler != nil && tr.events[di+1].Recyclable {
+ go tr.recycler(tr.events[di+1].What)
+ }
+ copy(tr.events[di+1:], tr.events[di+2:])
+ tr.events[tr.maxEvents-1] = e
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+ tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+ tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() {
+ tr.mu.Lock()
+ tr.IsError = true
+ tr.mu.Unlock()
+}
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+ tr.mu.Lock()
+ tr.recycler = f
+ tr.mu.Unlock()
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+ tr.mu.Lock()
+ tr.traceID, tr.spanID = traceID, spanID
+ tr.mu.Unlock()
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+ tr.mu.Lock()
+ // Always keep at least three events: first, discarded count, last.
+ if len(tr.events) == 0 && m > 3 {
+ tr.maxEvents = m
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) ref() {
+ atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+ if atomic.AddInt32(&tr.refs, -1) == 0 {
+ tr.mu.RLock()
+ if tr.recycler != nil {
+ // freeTrace clears tr, so we hold tr.recycler and tr.events here.
+ go func(f func(interface{}), es []event) {
+ for _, e := range es {
+ if e.Recyclable {
+ f(e.What)
+ }
+ }
+ }(tr.recycler, tr.events)
+ }
+ tr.mu.RUnlock()
+
+ freeTrace(tr)
+ }
+}
+
+func (tr *trace) When() string {
+ return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+ tr.mu.RLock()
+ t := tr.Elapsed
+ tr.mu.RUnlock()
+
+ if t == 0 {
+ // Active trace.
+ t = time.Since(tr.Start)
+ }
+ return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+ return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+ select {
+ case tr := <-traceFreeList:
+ return tr
+ default:
+ return new(trace)
+ }
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+ if DebugUseAfterFinish {
+ return // never reuse
+ }
+ tr.reset()
+ select {
+ case traceFreeList <- tr:
+ default:
+ }
+}
+
+func elapsed(d time.Duration) string {
+ b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+ // For subsecond durations, blank all zeros before decimal point,
+ // and all zeros between the decimal point and the first non-zero digit.
+ if d < time.Second {
+ dot := bytes.IndexByte(b, '.')
+ for i := 0; i < dot; i++ {
+ b[i] = ' '
+ }
+ for i := dot + 1; i < len(b); i++ {
+ if b[i] == '0' {
+ b[i] = ' '
+ } else {
+ break
+ }
+ }
+ }
+
+ return string(b)
+}
+
+var pageTmplCache *template.Template
+var pageTmplOnce sync.Once
+
+func pageTmpl() *template.Template {
+ pageTmplOnce.Do(func() {
+ pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "add": func(a, b int) int { return a + b },
+ }).Parse(pageHTML))
+ })
+ return pageTmplCache
+}
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+
+
+ /debug/requests
+
+
+
+
+/debug/requests
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+
+{{end}} {{/* end of StatusTable */}}
+
+{{define "Epilog"}}
+{{if $.Traces}}
+
+Family: {{$.Family}}
+
+{{if or $.Expanded $.Traced}}
+ [Normal/Summary]
+{{else}}
+ [Normal/Summary]
+{{end}}
+
+{{if or (not $.Expanded) $.Traced}}
+ [Normal/Expanded]
+{{else}}
+ [Normal/Expanded]
+{{end}}
+
+{{if not $.Active}}
+ {{if or $.Expanded (not $.Traced)}}
+ [Traced/Summary]
+ {{else}}
+ [Traced/Summary]
+ {{end}}
+ {{if or (not $.Expanded) (not $.Traced)}}
+ [Traced/Expanded]
+ {{else}}
+ [Traced/Expanded]
+ {{end}}
+{{end}}
+
+{{if $.Total}}
+Showing {{len $.Traces}} of {{$.Total}} traces.
+{{end}}
+
+
+
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests
+
+ When Elapsed (s)
+ {{range $tr := $.Traces}}
+
+ {{$tr.When}}
+ {{$tr.ElapsedTime}}
+ {{$tr.Title}}
+ {{/* TODO: include traceID/spanID */}}
+
+ {{if $.Expanded}}
+ {{range $tr.Events}}
+
+ {{.WhenString}}
+ {{elapsed .Elapsed}}
+ {{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted] {{end}}
+
+ {{end}}
+ {{end}}
+ {{end}}
+
+{{end}} {{/* if $.Traces */}}
+
+{{if $.Histogram}}
+Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}
+{{$.Histogram}}
+{{end}} {{/* if $.Histogram */}}
+
+
+
+{{end}} {{/* end of Epilog */}}
+`
diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE
new file mode 100644
index 0000000..2a7cf70
--- /dev/null
+++ b/vendor/golang.org/x/sync/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/sync/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go
new file mode 100644
index 0000000..b618162
--- /dev/null
+++ b/vendor/golang.org/x/sync/semaphore/semaphore.go
@@ -0,0 +1,160 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semaphore provides a weighted semaphore implementation.
+package semaphore // import "golang.org/x/sync/semaphore"
+
+import (
+ "container/list"
+ "context"
+ "sync"
+)
+
+type waiter struct {
+ n int64
+ ready chan<- struct{} // Closed when semaphore acquired.
+}
+
+// NewWeighted creates a new weighted semaphore with the given
+// maximum combined weight for concurrent access.
+func NewWeighted(n int64) *Weighted {
+ w := &Weighted{size: n}
+ return w
+}
+
+// Weighted provides a way to bound concurrent access to a resource.
+// The callers can request access with a given weight.
+type Weighted struct {
+ size int64
+ cur int64
+ mu sync.Mutex
+ waiters list.List
+}
+
+// Acquire acquires the semaphore with a weight of n, blocking until resources
+// are available or ctx is done. On success, returns nil. On failure, returns
+// ctx.Err() and leaves the semaphore unchanged.
+func (s *Weighted) Acquire(ctx context.Context, n int64) error {
+ done := ctx.Done()
+
+ s.mu.Lock()
+ select {
+ case <-done:
+ // ctx becoming done has "happened before" acquiring the semaphore,
+ // whether it became done before the call began or while we were
+ // waiting for the mutex. We prefer to fail even if we could acquire
+ // the mutex without blocking.
+ s.mu.Unlock()
+ return ctx.Err()
+ default:
+ }
+ if s.size-s.cur >= n && s.waiters.Len() == 0 {
+ // Since we hold s.mu and haven't synchronized since checking done, if
+ // ctx becomes done before we return here, it becoming done must have
+ // "happened concurrently" with this call - it cannot "happen before"
+ // we return in this branch. So, we're ok to always acquire here.
+ s.cur += n
+ s.mu.Unlock()
+ return nil
+ }
+
+ if n > s.size {
+ // Don't make other Acquire calls block on one that's doomed to fail.
+ s.mu.Unlock()
+ <-done
+ return ctx.Err()
+ }
+
+ ready := make(chan struct{})
+ w := waiter{n: n, ready: ready}
+ elem := s.waiters.PushBack(w)
+ s.mu.Unlock()
+
+ select {
+ case <-done:
+ s.mu.Lock()
+ select {
+ case <-ready:
+ // Acquired the semaphore after we were canceled.
+ // Pretend we didn't and put the tokens back.
+ s.cur -= n
+ s.notifyWaiters()
+ default:
+ isFront := s.waiters.Front() == elem
+ s.waiters.Remove(elem)
+ // If we're at the front and there're extra tokens left, notify other waiters.
+ if isFront && s.size > s.cur {
+ s.notifyWaiters()
+ }
+ }
+ s.mu.Unlock()
+ return ctx.Err()
+
+ case <-ready:
+ // Acquired the semaphore. Check that ctx isn't already done.
+ // We check the done channel instead of calling ctx.Err because we
+ // already have the channel, and ctx.Err is O(n) with the nesting
+ // depth of ctx.
+ select {
+ case <-done:
+ s.Release(n)
+ return ctx.Err()
+ default:
+ }
+ return nil
+ }
+}
+
+// TryAcquire acquires the semaphore with a weight of n without blocking.
+// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
+func (s *Weighted) TryAcquire(n int64) bool {
+ s.mu.Lock()
+ success := s.size-s.cur >= n && s.waiters.Len() == 0
+ if success {
+ s.cur += n
+ }
+ s.mu.Unlock()
+ return success
+}
+
+// Release releases the semaphore with a weight of n.
+func (s *Weighted) Release(n int64) {
+ s.mu.Lock()
+ s.cur -= n
+ if s.cur < 0 {
+ s.mu.Unlock()
+ panic("semaphore: released more than held")
+ }
+ s.notifyWaiters()
+ s.mu.Unlock()
+}
+
+func (s *Weighted) notifyWaiters() {
+ for {
+ next := s.waiters.Front()
+ if next == nil {
+ break // No more waiters blocked.
+ }
+
+ w := next.Value.(waiter)
+ if s.size-s.cur < w.n {
+ // Not enough tokens for the next waiter. We could keep going (to try to
+ // find a waiter with a smaller request), but under load that could cause
+ // starvation for large requests; instead, we leave all remaining waiters
+ // blocked.
+ //
+ // Consider a semaphore used as a read-write lock, with N tokens, N
+ // readers, and one writer. Each reader can Acquire(1) to obtain a read
+ // lock. The writer can Acquire(N) to obtain a write lock, excluding all
+ // of the readers. If we allow the readers to jump ahead in the queue,
+ // the writer will starve — there is always one token available for every
+ // reader.
+ break
+ }
+
+ s.cur += w.n
+ s.waiters.Remove(next)
+ close(w.ready)
+ }
+}
diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE
new file mode 100644
index 0000000..2a7cf70
--- /dev/null
+++ b/vendor/golang.org/x/sys/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/sys/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore
new file mode 100644
index 0000000..e3e0fc6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/.gitignore
@@ -0,0 +1,2 @@
+_obj/
+unix.test
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
new file mode 100644
index 0000000..6e08a76
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -0,0 +1,184 @@
+# Building `sys/unix`
+
+The sys/unix package provides access to the raw system call interface of the
+underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
+
+Porting Go to a new architecture/OS combination or adding syscalls, types, or
+constants to an existing architecture/OS pair requires some manual effort;
+however, there are tools that automate much of the process.
+
+## Build Systems
+
+There are currently two ways we generate the necessary files. We are currently
+migrating the build system to use containers so the builds are reproducible.
+This is being done on an OS-by-OS basis. Please update this documentation as
+components of the build system change.
+
+### Old Build System (currently for `GOOS != "linux"`)
+
+The old build system generates the Go files based on the C header files
+present on your system. This means that files
+for a given GOOS/GOARCH pair must be generated on a system with that OS and
+architecture. This also means that the generated code can differ from system
+to system, based on differences in the header files.
+
+To avoid this, if you are using the old build system, only generate the Go
+files on an installation with unmodified header files. It is also important to
+keep track of which version of the OS the files were generated from (ex.
+Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
+and have each OS upgrade correspond to a single change.
+
+To build the files for your current OS and architecture, make sure GOOS and
+GOARCH are set correctly and run `mkall.sh`. This will generate the files for
+your specific system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, go
+
+### New Build System (currently for `GOOS == "linux"`)
+
+The new build system uses a Docker container to generate the go files directly
+from source checkouts of the kernel and various system libraries. This means
+that on any platform that supports Docker, all the files using the new build
+system can be generated at once, and generated files will not change based on
+what the person running the scripts has installed on their computer.
+
+The OS specific files for the new build system are located in the `${GOOS}`
+directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
+the kernel or system library updates, modify the Dockerfile at
+`${GOOS}/Dockerfile` to checkout the new release of the source.
+
+To build all the files under the new build system, you must be on an amd64/Linux
+system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
+then generate all of the files for all of the GOOS/GOARCH pairs in the new build
+system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, go, docker
+
+## Component files
+
+This section describes the various files used in the code generation process.
+It also contains instructions on how to modify these files to add a new
+architecture/OS or to add additional syscalls, types, or constants. Note that
+if you are using the new build system, the scripts/programs cannot be called normally.
+They must be called from within the docker container.
+
+### asm files
+
+The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
+call dispatch. There are three entry points:
+```
+ func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+ func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+ func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+```
+The first and second are the standard ones; they differ only in how many
+arguments can be passed to the kernel. The third is for low-level use by the
+ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
+let it know that a system call is running.
+
+When porting Go to a new architecture/OS, this file must be implemented for
+each GOOS/GOARCH pair.
+
+### mksysnum
+
+Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
+for the old system). This program takes in a list of header files containing the
+syscall number declarations and parses them to produce the corresponding list of
+Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
+constants.
+
+Adding new syscall numbers is mostly done by running the build on a sufficiently
+new installation of the target OS (or updating the source checkouts for the
+new build system). However, depending on the OS, you may need to update the
+parsing in mksysnum.
+
+### mksyscall.go
+
+The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
+hand-written Go files which implement system calls (for unix, the specific OS,
+or the specific OS/Architecture pair respectively) that need special handling
+and list `//sys` comments giving prototypes for ones that can be generated.
+
+The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
+them into syscalls. This requires the name of the prototype in the comment to
+match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
+prototype can be exported (capitalized) or not.
+
+Adding a new syscall often just requires adding a new `//sys` function prototype
+with the desired arguments and a capitalized name so it is exported. However, if
+you want the interface to the syscall to be different, often one will make an
+unexported `//sys` prototype, and then write a custom wrapper in
+`syscall_${GOOS}.go`.
+
+### types files
+
+For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
+`types_${GOOS}.go` on the old system). This file includes standard C headers and
+creates Go type aliases to the corresponding C types. The file is then fed
+through godef to get the Go compatible definitions. Finally, the generated code
+is fed though mkpost.go to format the code correctly and remove any hidden or
+private identifiers. This cleaned-up code is written to
+`ztypes_${GOOS}_${GOARCH}.go`.
+
+The hardest part about preparing this file is figuring out which headers to
+include and which symbols need to be `#define`d to get the actual data
+structures that pass through to the kernel system calls. Some C libraries
+preset alternate versions for binary compatibility and translate them on the
+way in and out of system calls, but there is almost always a `#define` that can
+get the real ones.
+See `types_darwin.go` and `linux/types.go` for examples.
+
+To add a new type, add in the necessary include statement at the top of the
+file (if it is not already there) and add in a type alias line. Note that if
+your type is significantly different on different architectures, you may need
+some `#if/#elif` macros in your include statements.
+
+### mkerrors.sh
+
+This script is used to generate the system's various constants. This doesn't
+just include the error numbers and error strings, but also the signal numbers
+and a wide variety of miscellaneous constants. The constants come from the list
+of include files in the `includes_${uname}` variable. A regex then picks out
+the desired `#define` statements, and generates the corresponding Go constants.
+The error numbers and strings are generated from `#include `, and the
+signal numbers and strings are generated from `#include `. All of
+these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
+`_errors.c`, which prints out all the constants.
+
+To add a constant, add the header that includes it to the appropriate variable.
+Then, edit the regex (if necessary) to match the desired constant. Avoid making
+the regex too broad to avoid matching unintended constants.
+
+### internal/mkmerge
+
+This program is used to extract duplicate const, func, and type declarations
+from the generated architecture-specific files listed below, and merge these
+into a common file for each OS.
+
+The merge is performed in the following steps:
+1. Construct the set of common code that is identical in all architecture-specific files.
+2. Write this common code to the merged file.
+3. Remove the common code from all architecture-specific files.
+
+
+## Generated files
+
+### `zerrors_${GOOS}_${GOARCH}.go`
+
+A file containing all of the system's generated error numbers, error strings,
+signal numbers, and constants. Generated by `mkerrors.sh` (see above).
+
+### `zsyscall_${GOOS}_${GOARCH}.go`
+
+A file containing all the generated syscalls for a specific GOOS and GOARCH.
+Generated by `mksyscall.go` (see above).
+
+### `zsysnum_${GOOS}_${GOARCH}.go`
+
+A list of numeric constants for all the syscall number of the specific GOOS
+and GOARCH. Generated by mksysnum (see above).
+
+### `ztypes_${GOOS}_${GOARCH}.go`
+
+A file containing Go types for passing into (or returning from) syscalls.
+Generated by godefs and the types file (see above).
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
new file mode 100644
index 0000000..6e5c81a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -0,0 +1,86 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU affinity functions
+
+package unix
+
+import (
+ "math/bits"
+ "unsafe"
+)
+
+const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
+
+// CPUSet represents a CPU affinity mask.
+type CPUSet [cpuSetSize]cpuMask
+
+func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
+ _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
+ if e != 0 {
+ return errnoErr(e)
+ }
+ return nil
+}
+
+// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedGetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
+}
+
+// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedSetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
+}
+
+// Zero clears the set s, so that it contains no CPUs.
+func (s *CPUSet) Zero() {
+ for i := range s {
+ s[i] = 0
+ }
+}
+
+func cpuBitsIndex(cpu int) int {
+ return cpu / _NCPUBITS
+}
+
+func cpuBitsMask(cpu int) cpuMask {
+ return cpuMask(1 << (uint(cpu) % _NCPUBITS))
+}
+
+// Set adds cpu to the set s.
+func (s *CPUSet) Set(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] |= cpuBitsMask(cpu)
+ }
+}
+
+// Clear removes cpu from the set s.
+func (s *CPUSet) Clear(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] &^= cpuBitsMask(cpu)
+ }
+}
+
+// IsSet reports whether cpu is in the set s.
+func (s *CPUSet) IsSet(cpu int) bool {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ return s[i]&cpuBitsMask(cpu) != 0
+ }
+ return false
+}
+
+// Count returns the number of CPUs in the set s.
+func (s *CPUSet) Count() int {
+ c := 0
+ for _, b := range s {
+ c += bits.OnesCount64(uint64(b))
+ }
+ return c
+}
diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go
new file mode 100644
index 0000000..b0e4198
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/aliases.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+
+package unix
+
+import "syscall"
+
+type Signal = syscall.Signal
+type Errno = syscall.Errno
+type SysProcAttr = syscall.SysProcAttr
diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
new file mode 100644
index 0000000..269e173
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
+//
+
+TEXT ·syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·syscall6(SB)
+
+TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
+ JMP syscall·rawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s
new file mode 100644
index 0000000..a4fcef0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (freebsd || netbsd || openbsd) && gc
+
+#include "textflag.h"
+
+// System call support for 386 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
new file mode 100644
index 0000000..1e63615
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
+
+#include "textflag.h"
+
+// System call support for AMD64 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
new file mode 100644
index 0000000..6496c31
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (freebsd || netbsd || openbsd) && gc
+
+#include "textflag.h"
+
+// System call support for ARM BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
new file mode 100644
index 0000000..4fd1f54
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || freebsd || netbsd || openbsd) && gc
+
+#include "textflag.h"
+
+// System call support for ARM64 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
new file mode 100644
index 0000000..42f7eb9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || freebsd || netbsd || openbsd) && gc
+
+#include "textflag.h"
+
+//
+// System call support for ppc64, BSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
new file mode 100644
index 0000000..f890266
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || freebsd || netbsd || openbsd) && gc
+
+#include "textflag.h"
+
+// System call support for RISCV64 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s
new file mode 100644
index 0000000..3b47348
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -0,0 +1,65 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+//
+// System calls for 386, Linux
+//
+
+// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
+// instead of the glibc-specific "CALL 0x10(GS)".
+#define INVOKE_SYSCALL INT $0x80
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ CALL runtime·entersyscall(SB)
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ RET
+
+TEXT ·socketcall(SB),NOSPLIT,$0-36
+ JMP syscall·socketcall(SB)
+
+TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
+ JMP syscall·rawsocketcall(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-28
+ JMP syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
new file mode 100644
index 0000000..67e29f3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -0,0 +1,57 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+//
+// System calls for AMD64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ RET
+
+TEXT ·gettimeofday(SB),NOSPLIT,$0-16
+ JMP syscall·gettimeofday(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
new file mode 100644
index 0000000..d6ae269
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -0,0 +1,56 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+//
+// System calls for arm, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ BL runtime·entersyscall(SB)
+ MOVW trap+0(FP), R7
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ MOVW $0, R3
+ MOVW $0, R4
+ MOVW $0, R5
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW trap+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ RET
+
+TEXT ·seek(SB),NOSPLIT,$0-28
+ B syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
new file mode 100644
index 0000000..01e5e25
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -0,0 +1,50 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && arm64 && gc
+
+#include "textflag.h"
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP) // r1
+ MOVD R1, r2+40(FP) // r2
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP)
+ MOVD R1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
new file mode 100644
index 0000000..2abf12f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
@@ -0,0 +1,51 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && loong64 && gc
+
+#include "textflag.h"
+
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ JAL runtime·entersyscall(SB)
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R11 // syscall entry
+ SYSCALL
+ MOVV R4, r1+32(FP)
+ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R11 // syscall entry
+ SYSCALL
+ MOVV R4, r1+32(FP)
+ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
new file mode 100644
index 0000000..f84bae7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -0,0 +1,54 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips64 || mips64le) && gc
+
+#include "textflag.h"
+
+//
+// System calls for mips64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ JAL runtime·entersyscall(SB)
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
new file mode 100644
index 0000000..f08f628
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
@@ -0,0 +1,52 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips || mipsle) && gc
+
+#include "textflag.h"
+
+//
+// System calls for mips, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ JAL runtime·entersyscall(SB)
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW R0, R7
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP) // r1
+ MOVW R3, r2+20(FP) // r2
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP)
+ MOVW R3, r2+20(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
new file mode 100644
index 0000000..bdfc024
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -0,0 +1,42 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (ppc64 || ppc64le) && gc
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
new file mode 100644
index 0000000..2e8c996
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
@@ -0,0 +1,47 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build riscv64 && gc
+
+#include "textflag.h"
+
+//
+// System calls for linux/riscv64.
+//
+// Where available, just jump to package syscall's implementation of
+// these functions.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP) // r1
+ MOV A1, r2+40(FP) // r2
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP)
+ MOV A1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
new file mode 100644
index 0000000..2c394b1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
@@ -0,0 +1,54 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && s390x && gc
+
+#include "textflag.h"
+
+//
+// System calls for s390x, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ BR syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ BR syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ BR syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ BR syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
new file mode 100644
index 0000000..fab586a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+//
+// System call support for mips64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
new file mode 100644
index 0000000..f949ec5
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+
+#include "textflag.h"
+
+//
+// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
+//
+
+TEXT ·sysvicall6(SB),NOSPLIT,$0-88
+ JMP syscall·sysvicall6(SB)
+
+TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
+ JMP syscall·rawSysvicall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
new file mode 100644
index 0000000..813dfad
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
@@ -0,0 +1,382 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x && gc
+
+#include "textflag.h"
+
+#define PSALAA 1208(R0)
+#define GTAB64(x) 80(x)
+#define LCA64(x) 88(x)
+#define SAVSTACK_ASYNC(x) 336(x) // in the LCA
+#define CAA(x) 8(x)
+#define CEECAATHDID(x) 976(x) // in the CAA
+#define EDCHPXV(x) 1016(x) // in the CAA
+#define GOCB(x) 1104(x) // in the CAA
+
+// SS_*, where x=SAVSTACK_ASYNC
+#define SS_LE(x) 0(x)
+#define SS_GO(x) 8(x)
+#define SS_ERRNO(x) 16(x)
+#define SS_ERRNOJR(x) 20(x)
+
+// Function Descriptor Offsets
+#define __errno 0x156*16
+#define __err2ad 0x16C*16
+
+// Call Instructions
+#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6
+#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD
+#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE
+
+DATA zosLibVec<>(SB)/8, $0
+GLOBL zosLibVec<>(SB), NOPTR, $8
+
+TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD CAA(R8), R8
+ MOVD EDCHPXV(R8), R8
+ MOVD R8, zosLibVec<>(SB)
+ RET
+
+TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0
+ MOVD zosLibVec<>(SB), R8
+ MOVD R8, ret+0(FP)
+ RET
+
+TEXT ·clearErrno(SB), NOSPLIT, $0-0
+ BL addrerrno<>(SB)
+ MOVD $0, 0(R3)
+ RET
+
+// Returns the address of errno in R3.
+TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get __errno FuncDesc.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ ADD $(__errno), R9
+ LMG 0(R9), R5, R6
+
+ // Switch to saved LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Call __errno function.
+ LE_CALL
+ NOPH
+
+ // Switch back to Go stack.
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+ RET
+
+// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64)
+TEXT ·svcCall(SB), NOSPLIT, $0
+ BL runtime·save_g(SB) // Save g and stack pointer
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD R15, 0(R9)
+
+ MOVD argv+8(FP), R1 // Move function arguments into registers
+ MOVD dsa+16(FP), g
+ MOVD fnptr+0(FP), R15
+
+ BYTE $0x0D // Branch to function
+ BYTE $0xEF
+
+ BL runtime·load_g(SB) // Restore g and stack pointer
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R15
+
+ RET
+
+// func svcLoad(name *byte) unsafe.Pointer
+TEXT ·svcLoad(SB), NOSPLIT, $0
+ MOVD R15, R2 // Save go stack pointer
+ MOVD name+0(FP), R0 // Move SVC args into registers
+ MOVD $0x80000000, R1
+ MOVD $0, R15
+ SVC_LOAD
+ MOVW R15, R3 // Save return code from SVC
+ MOVD R2, R15 // Restore go stack pointer
+ CMP R3, $0 // Check SVC return code
+ BNE error
+
+ MOVD $-2, R3 // Reset last bit of entry point to zero
+ AND R0, R3
+ MOVD R3, ret+8(FP) // Return entry point returned by SVC
+ CMP R0, R3 // Check if last bit of entry point was set
+ BNE done
+
+ MOVD R15, R2 // Save go stack pointer
+ MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08)
+ SVC_DELETE
+ MOVD R2, R15 // Restore go stack pointer
+
+error:
+ MOVD $0, ret+8(FP) // Return 0 on failure
+
+done:
+ XOR R0, R0 // Reset r0 to 0
+ RET
+
+// func svcUnload(name *byte, fnptr unsafe.Pointer) int64
+TEXT ·svcUnload(SB), NOSPLIT, $0
+ MOVD R15, R2 // Save go stack pointer
+ MOVD name+0(FP), R0 // Move SVC args into registers
+ MOVD fnptr+8(FP), R15
+ SVC_DELETE
+ XOR R0, R0 // Reset r0 to 0
+ MOVD R15, R1 // Save SVC return code
+ MOVD R2, R15 // Restore go stack pointer
+ MOVD R1, ret+16(FP) // Return SVC return code
+ RET
+
+// func gettid() uint64
+TEXT ·gettid(SB), NOSPLIT, $0
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get CEECAATHDID
+ MOVD CAA(R8), R9
+ MOVD CEECAATHDID(R9), R9
+ MOVD R9, ret+0(FP)
+
+ RET
+
+//
+// Call LE function, if the return is -1
+// errno and errno2 is retrieved
+//
+TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD CAA(R8), R9
+ MOVD g, GOCB(R9)
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address
+ MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer
+
+ MOVD parms_base+8(FP), R7 // R7 -> argument array
+ MOVD parms_len+16(FP), R8 // R8 number of arguments
+
+ // arg 1 ---> R1
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ MOVD 0(R7), R1
+
+ // arg 2 ---> R2
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ ADD $8, R7
+ MOVD 0(R7), R2
+
+ // arg 3 --> R3
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ ADD $8, R7
+ MOVD 0(R7), R3
+
+ CMP R8, $0
+ BEQ docall
+ MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument
+
+repeat:
+ ADD $8, R7
+ MOVD 0(R7), R0 // advance arg pointer by 8 byte
+ ADD $8, R6 // advance LE argument address by 8 byte
+ MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame
+ SUB $1, R8
+ CMP R8, $0
+ BNE repeat
+
+docall:
+ MOVD funcdesc+0(FP), R8 // R8-> function descriptor
+ LMG 0(R8), R5, R6
+ MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC
+ LE_CALL // balr R7, R6 (return #1)
+ NOPH
+ MOVD R3, ret+32(FP)
+ CMP R3, $-1 // compare result to -1
+ BNE done
+
+ // retrieve errno and errno2
+ MOVD zosLibVec<>(SB), R8
+ ADD $(__errno), R8
+ LMG 0(R8), R5, R6
+ LE_CALL // balr R7, R6 __errno (return #3)
+ NOPH
+ MOVWZ 0(R3), R3
+ MOVD R3, err+48(FP)
+ MOVD zosLibVec<>(SB), R8
+ ADD $(__err2ad), R8
+ LMG 0(R8), R5, R6
+ LE_CALL // balr R7, R6 __err2ad (return #2)
+ NOPH
+ MOVW (R3), R2 // retrieve errno2
+ MOVD R2, errno2+40(FP) // store in return area
+
+done:
+ MOVD R4, 0(R9) // Save stack pointer.
+ RET
+
+//
+// Call LE function, if the return is 0
+// errno and errno2 is retrieved
+//
+TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD CAA(R8), R9
+ MOVD g, GOCB(R9)
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address
+ MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer
+
+ MOVD parms_base+8(FP), R7 // R7 -> argument array
+ MOVD parms_len+16(FP), R8 // R8 number of arguments
+
+ // arg 1 ---> R1
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ MOVD 0(R7), R1
+
+ // arg 2 ---> R2
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ ADD $8, R7
+ MOVD 0(R7), R2
+
+ // arg 3 --> R3
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ ADD $8, R7
+ MOVD 0(R7), R3
+
+ CMP R8, $0
+ BEQ docall
+ MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument
+
+repeat:
+ ADD $8, R7
+ MOVD 0(R7), R0 // advance arg pointer by 8 byte
+ ADD $8, R6 // advance LE argument address by 8 byte
+ MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame
+ SUB $1, R8
+ CMP R8, $0
+ BNE repeat
+
+docall:
+ MOVD funcdesc+0(FP), R8 // R8-> function descriptor
+ LMG 0(R8), R5, R6
+ MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC
+ LE_CALL // balr R7, R6 (return #1)
+ NOPH
+ MOVD R3, ret+32(FP)
+ CMP R3, $0 // compare result to 0
+ BNE done
+
+ // retrieve errno and errno2
+ MOVD zosLibVec<>(SB), R8
+ ADD $(__errno), R8
+ LMG 0(R8), R5, R6
+ LE_CALL // balr R7, R6 __errno (return #3)
+ NOPH
+ MOVWZ 0(R3), R3
+ MOVD R3, err+48(FP)
+ MOVD zosLibVec<>(SB), R8
+ ADD $(__err2ad), R8
+ LMG 0(R8), R5, R6
+ LE_CALL // balr R7, R6 __err2ad (return #2)
+ NOPH
+ MOVW (R3), R2 // retrieve errno2
+ MOVD R2, errno2+40(FP) // store in return area
+ XOR R2, R2
+ MOVWZ R2, (R3) // clear errno2
+
+done:
+ MOVD R4, 0(R9) // Save stack pointer.
+ RET
+
+//
+// function to test if a pointer can be safely dereferenced (content read)
+// return 0 for succces
+//
+TEXT ·ptrtest(SB), NOSPLIT, $0-16
+ MOVD arg+0(FP), R10 // test pointer in R10
+
+ // set up R2 to point to CEECAADMC
+ BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208
+ BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2
+ BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767
+ BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2)
+ BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2)
+ BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2)
+
+ // set up R5 to point to the "shunt" path which set 1 to R3 (failure)
+ BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3
+ BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1
+ BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1
+
+ // if r3 is not zero (failed) then branch to finish
+ BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3
+ BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2
+
+ // stomic store shunt address in R5 into CEECAADMC
+ BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2)
+
+ // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above
+ BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10)
+
+ // finish here, restore 0 into CEECAADMC
+ BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9
+ BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2)
+ MOVD R3, ret+8(FP) // result in R3
+ RET
+
+//
+// function to test if a untptr can be loaded from a pointer
+// return 1: the 8-byte content
+// 2: 0 for success, 1 for failure
+//
+// func safeload(ptr uintptr) ( value uintptr, error uintptr)
+TEXT ·safeload(SB), NOSPLIT, $0-24
+ MOVD ptr+0(FP), R10 // test pointer in R10
+ MOVD $0x0, R6
+ BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208
+ BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2
+ BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767
+ BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2)
+ BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2)
+ BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2)
+ BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3
+ BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1
+ BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1
+ BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3
+ BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2
+ BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2)
+ BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10)
+ BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9
+ BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2)
+ MOVD R6, value+8(FP) // result in R6
+ MOVD R3, error+16(FP) // error in R3
+ RET
diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go
new file mode 100644
index 0000000..37a8252
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv.go
@@ -0,0 +1,36 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//go:linkname runtime_getAuxv runtime.getAuxv
+func runtime_getAuxv() []uintptr
+
+// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
+// The returned slice is always a fresh copy, owned by the caller.
+// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
+// which happens in some locked-down environments and build modes.
+func Auxv() ([][2]uintptr, error) {
+ vec := runtime_getAuxv()
+ vecLen := len(vec)
+
+ if vecLen == 0 {
+ return nil, syscall.ENOENT
+ }
+
+ if vecLen%2 != 0 {
+ return nil, syscall.EINVAL
+ }
+
+ result := make([]uintptr, vecLen)
+ copy(result, vec)
+ return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
+}
diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
new file mode 100644
index 0000000..1200487
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import "syscall"
+
+func Auxv() ([][2]uintptr, error) {
+ return nil, syscall.ENOTSUP
+}
diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
new file mode 100644
index 0000000..a178a61
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Bluetooth sockets and messages
+
+package unix
+
+// Bluetooth Protocols
+const (
+ BTPROTO_L2CAP = 0
+ BTPROTO_HCI = 1
+ BTPROTO_SCO = 2
+ BTPROTO_RFCOMM = 3
+ BTPROTO_BNEP = 4
+ BTPROTO_CMTP = 5
+ BTPROTO_HIDP = 6
+ BTPROTO_AVDTP = 7
+)
+
+const (
+ HCI_CHANNEL_RAW = 0
+ HCI_CHANNEL_USER = 1
+ HCI_CHANNEL_MONITOR = 2
+ HCI_CHANNEL_CONTROL = 3
+ HCI_CHANNEL_LOGGING = 4
+)
+
+// Socketoption Level
+const (
+ SOL_BLUETOOTH = 0x112
+ SOL_HCI = 0x0
+ SOL_L2CAP = 0x6
+ SOL_RFCOMM = 0x12
+ SOL_SCO = 0x11
+)
diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go
new file mode 100644
index 0000000..39d647d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go
@@ -0,0 +1,657 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos
+
+package unix
+
+import (
+ "bytes"
+ "fmt"
+ "unsafe"
+)
+
+//go:noescape
+func bpxcall(plist []unsafe.Pointer, bpx_offset int64)
+
+//go:noescape
+func A2e([]byte)
+
+//go:noescape
+func E2a([]byte)
+
+const (
+ BPX4STA = 192 // stat
+ BPX4FST = 104 // fstat
+ BPX4LST = 132 // lstat
+ BPX4OPN = 156 // open
+ BPX4CLO = 72 // close
+ BPX4CHR = 500 // chattr
+ BPX4FCR = 504 // fchattr
+ BPX4LCR = 1180 // lchattr
+ BPX4CTW = 492 // cond_timed_wait
+ BPX4GTH = 1056 // __getthent
+ BPX4PTQ = 412 // pthread_quiesc
+ BPX4PTR = 320 // ptrace
+)
+
+const (
+ //options
+ //byte1
+ BPX_OPNFHIGH = 0x80
+ //byte2
+ BPX_OPNFEXEC = 0x80
+ //byte3
+ BPX_O_NOLARGEFILE = 0x08
+ BPX_O_LARGEFILE = 0x04
+ BPX_O_ASYNCSIG = 0x02
+ BPX_O_SYNC = 0x01
+ //byte4
+ BPX_O_CREXCL = 0xc0
+ BPX_O_CREAT = 0x80
+ BPX_O_EXCL = 0x40
+ BPX_O_NOCTTY = 0x20
+ BPX_O_TRUNC = 0x10
+ BPX_O_APPEND = 0x08
+ BPX_O_NONBLOCK = 0x04
+ BPX_FNDELAY = 0x04
+ BPX_O_RDWR = 0x03
+ BPX_O_RDONLY = 0x02
+ BPX_O_WRONLY = 0x01
+ BPX_O_ACCMODE = 0x03
+ BPX_O_GETFL = 0x0f
+
+ //mode
+ // byte1 (file type)
+ BPX_FT_DIR = 1
+ BPX_FT_CHARSPEC = 2
+ BPX_FT_REGFILE = 3
+ BPX_FT_FIFO = 4
+ BPX_FT_SYMLINK = 5
+ BPX_FT_SOCKET = 6
+ //byte3
+ BPX_S_ISUID = 0x08
+ BPX_S_ISGID = 0x04
+ BPX_S_ISVTX = 0x02
+ BPX_S_IRWXU1 = 0x01
+ BPX_S_IRUSR = 0x01
+ //byte4
+ BPX_S_IRWXU2 = 0xc0
+ BPX_S_IWUSR = 0x80
+ BPX_S_IXUSR = 0x40
+ BPX_S_IRWXG = 0x38
+ BPX_S_IRGRP = 0x20
+ BPX_S_IWGRP = 0x10
+ BPX_S_IXGRP = 0x08
+ BPX_S_IRWXOX = 0x07
+ BPX_S_IROTH = 0x04
+ BPX_S_IWOTH = 0x02
+ BPX_S_IXOTH = 0x01
+
+ CW_INTRPT = 1
+ CW_CONDVAR = 32
+ CW_TIMEOUT = 64
+
+ PGTHA_NEXT = 2
+ PGTHA_CURRENT = 1
+ PGTHA_FIRST = 0
+ PGTHA_LAST = 3
+ PGTHA_PROCESS = 0x80
+ PGTHA_CONTTY = 0x40
+ PGTHA_PATH = 0x20
+ PGTHA_COMMAND = 0x10
+ PGTHA_FILEDATA = 0x08
+ PGTHA_THREAD = 0x04
+ PGTHA_PTAG = 0x02
+ PGTHA_COMMANDLONG = 0x01
+ PGTHA_THREADFAST = 0x80
+ PGTHA_FILEPATH = 0x40
+ PGTHA_THDSIGMASK = 0x20
+ // thread quiece mode
+ QUIESCE_TERM int32 = 1
+ QUIESCE_FORCE int32 = 2
+ QUIESCE_QUERY int32 = 3
+ QUIESCE_FREEZE int32 = 4
+ QUIESCE_UNFREEZE int32 = 5
+ FREEZE_THIS_THREAD int32 = 6
+ FREEZE_EXIT int32 = 8
+ QUIESCE_SRB int32 = 9
+)
+
+type Pgtha struct {
+ Pid uint32 // 0
+ Tid0 uint32 // 4
+ Tid1 uint32
+ Accesspid byte // C
+ Accesstid byte // D
+ Accessasid uint16 // E
+ Loginname [8]byte // 10
+ Flag1 byte // 18
+ Flag1b2 byte // 19
+}
+
+type Bpxystat_t struct { // DSECT BPXYSTAT
+ St_id [4]uint8 // 0
+ St_length uint16 // 0x4
+ St_version uint16 // 0x6
+ St_mode uint32 // 0x8
+ St_ino uint32 // 0xc
+ St_dev uint32 // 0x10
+ St_nlink uint32 // 0x14
+ St_uid uint32 // 0x18
+ St_gid uint32 // 0x1c
+ St_size uint64 // 0x20
+ St_atime uint32 // 0x28
+ St_mtime uint32 // 0x2c
+ St_ctime uint32 // 0x30
+ St_rdev uint32 // 0x34
+ St_auditoraudit uint32 // 0x38
+ St_useraudit uint32 // 0x3c
+ St_blksize uint32 // 0x40
+ St_createtime uint32 // 0x44
+ St_auditid [4]uint32 // 0x48
+ St_res01 uint32 // 0x58
+ Ft_ccsid uint16 // 0x5c
+ Ft_flags uint16 // 0x5e
+ St_res01a [2]uint32 // 0x60
+ St_res02 uint32 // 0x68
+ St_blocks uint32 // 0x6c
+ St_opaque [3]uint8 // 0x70
+ St_visible uint8 // 0x73
+ St_reftime uint32 // 0x74
+ St_fid uint64 // 0x78
+ St_filefmt uint8 // 0x80
+ St_fspflag2 uint8 // 0x81
+ St_res03 [2]uint8 // 0x82
+ St_ctimemsec uint32 // 0x84
+ St_seclabel [8]uint8 // 0x88
+ St_res04 [4]uint8 // 0x90
+ // end of version 1
+ _ uint32 // 0x94
+ St_atime64 uint64 // 0x98
+ St_mtime64 uint64 // 0xa0
+ St_ctime64 uint64 // 0xa8
+ St_createtime64 uint64 // 0xb0
+ St_reftime64 uint64 // 0xb8
+ _ uint64 // 0xc0
+ St_res05 [16]uint8 // 0xc8
+ // end of version 2
+}
+
+type BpxFilestatus struct {
+ Oflag1 byte
+ Oflag2 byte
+ Oflag3 byte
+ Oflag4 byte
+}
+
+type BpxMode struct {
+ Ftype byte
+ Mode1 byte
+ Mode2 byte
+ Mode3 byte
+}
+
+// Thr attribute structure for extended attributes
+type Bpxyatt_t struct { // DSECT BPXYATT
+ Att_id [4]uint8
+ Att_version uint16
+ Att_res01 [2]uint8
+ Att_setflags1 uint8
+ Att_setflags2 uint8
+ Att_setflags3 uint8
+ Att_setflags4 uint8
+ Att_mode uint32
+ Att_uid uint32
+ Att_gid uint32
+ Att_opaquemask [3]uint8
+ Att_visblmaskres uint8
+ Att_opaque [3]uint8
+ Att_visibleres uint8
+ Att_size_h uint32
+ Att_size_l uint32
+ Att_atime uint32
+ Att_mtime uint32
+ Att_auditoraudit uint32
+ Att_useraudit uint32
+ Att_ctime uint32
+ Att_reftime uint32
+ // end of version 1
+ Att_filefmt uint8
+ Att_res02 [3]uint8
+ Att_filetag uint32
+ Att_res03 [8]uint8
+ // end of version 2
+ Att_atime64 uint64
+ Att_mtime64 uint64
+ Att_ctime64 uint64
+ Att_reftime64 uint64
+ Att_seclabel [8]uint8
+ Att_ver3res02 [8]uint8
+ // end of version 3
+}
+
+func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) {
+ if len(name) < 1024 {
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], name))
+ A2e(namebuf[:sz])
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(options)
+ parms[3] = unsafe.Pointer(mode)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4OPN)
+ return rv, rc, rn
+ }
+ return -1, -1, -1
+}
+
+func BpxClose(fd int32) (rv int32, rc int32, rn int32) {
+ var parms [4]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&fd)
+ parms[1] = unsafe.Pointer(&rv)
+ parms[2] = unsafe.Pointer(&rc)
+ parms[3] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4CLO)
+ return rv, rc, rn
+}
+
+func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
+ st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
+ st.St_version = 2
+ stat_sz := uint32(unsafe.Sizeof(*st))
+ var parms [6]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&fd)
+ parms[1] = unsafe.Pointer(&stat_sz)
+ parms[2] = unsafe.Pointer(st)
+ parms[3] = unsafe.Pointer(&rv)
+ parms[4] = unsafe.Pointer(&rc)
+ parms[5] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4FST)
+ return rv, rc, rn
+}
+
+func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
+ if len(name) < 1024 {
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], name))
+ A2e(namebuf[:sz])
+ st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
+ st.St_version = 2
+ stat_sz := uint32(unsafe.Sizeof(*st))
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(&stat_sz)
+ parms[3] = unsafe.Pointer(st)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4STA)
+ return rv, rc, rn
+ }
+ return -1, -1, -1
+}
+
+func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
+ if len(name) < 1024 {
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], name))
+ A2e(namebuf[:sz])
+ st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
+ st.St_version = 2
+ stat_sz := uint32(unsafe.Sizeof(*st))
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(&stat_sz)
+ parms[3] = unsafe.Pointer(st)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4LST)
+ return rv, rc, rn
+ }
+ return -1, -1, -1
+}
+
+func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
+ if len(path) >= 1024 {
+ return -1, -1, -1
+ }
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], path))
+ A2e(namebuf[:sz])
+ attr_sz := uint32(unsafe.Sizeof(*attr))
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(&attr_sz)
+ parms[3] = unsafe.Pointer(attr)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4CHR)
+ return rv, rc, rn
+}
+
+func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
+ if len(path) >= 1024 {
+ return -1, -1, -1
+ }
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], path))
+ A2e(namebuf[:sz])
+ attr_sz := uint32(unsafe.Sizeof(*attr))
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(&attr_sz)
+ parms[3] = unsafe.Pointer(attr)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4LCR)
+ return rv, rc, rn
+}
+
+func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
+ attr_sz := uint32(unsafe.Sizeof(*attr))
+ var parms [6]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&fd)
+ parms[1] = unsafe.Pointer(&attr_sz)
+ parms[2] = unsafe.Pointer(attr)
+ parms[3] = unsafe.Pointer(&rv)
+ parms[4] = unsafe.Pointer(&rc)
+ parms[5] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4FCR)
+ return rv, rc, rn
+}
+
+func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) {
+ var parms [8]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sec)
+ parms[1] = unsafe.Pointer(&nsec)
+ parms[2] = unsafe.Pointer(&events)
+ parms[3] = unsafe.Pointer(secrem)
+ parms[4] = unsafe.Pointer(nsecrem)
+ parms[5] = unsafe.Pointer(&rv)
+ parms[6] = unsafe.Pointer(&rc)
+ parms[7] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4CTW)
+ return rv, rc, rn
+}
+func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) {
+ var parms [7]unsafe.Pointer
+ inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte
+ parms[0] = unsafe.Pointer(&inlen)
+ parms[1] = unsafe.Pointer(&in)
+ parms[2] = unsafe.Pointer(outlen)
+ parms[3] = unsafe.Pointer(&out)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4GTH)
+ return rv, rc, rn
+}
+func ZosJobname() (jobname string, err error) {
+ var pgtha Pgtha
+ pgtha.Pid = uint32(Getpid())
+ pgtha.Accesspid = PGTHA_CURRENT
+ pgtha.Flag1 = PGTHA_PROCESS
+ var out [256]byte
+ var outlen uint32
+ outlen = 256
+ rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0]))
+ if rv == 0 {
+ gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic
+ ix := bytes.Index(out[:], gthc)
+ if ix == -1 {
+ err = fmt.Errorf("BPX4GTH: gthc return data not found")
+ return
+ }
+ jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80
+ E2a(jn)
+ jobname = string(bytes.TrimRight(jn, " "))
+
+ } else {
+ err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn)
+ }
+ return
+}
+func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) {
+ var userdata [8]byte
+ var parms [5]unsafe.Pointer
+ copy(userdata[:], data+" ")
+ A2e(userdata[:])
+ parms[0] = unsafe.Pointer(&code)
+ parms[1] = unsafe.Pointer(&userdata[0])
+ parms[2] = unsafe.Pointer(&rv)
+ parms[3] = unsafe.Pointer(&rc)
+ parms[4] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4PTQ)
+ return rv, rc, rn
+}
+
+const (
+ PT_TRACE_ME = 0 // Debug this process
+ PT_READ_I = 1 // Read a full word
+ PT_READ_D = 2 // Read a full word
+ PT_READ_U = 3 // Read control info
+ PT_WRITE_I = 4 //Write a full word
+ PT_WRITE_D = 5 //Write a full word
+ PT_CONTINUE = 7 //Continue the process
+ PT_KILL = 8 //Terminate the process
+ PT_READ_GPR = 11 // Read GPR, CR, PSW
+ PT_READ_FPR = 12 // Read FPR
+ PT_READ_VR = 13 // Read VR
+ PT_WRITE_GPR = 14 // Write GPR, CR, PSW
+ PT_WRITE_FPR = 15 // Write FPR
+ PT_WRITE_VR = 16 // Write VR
+ PT_READ_BLOCK = 17 // Read storage
+ PT_WRITE_BLOCK = 19 // Write storage
+ PT_READ_GPRH = 20 // Read GPRH
+ PT_WRITE_GPRH = 21 // Write GPRH
+ PT_REGHSET = 22 // Read all GPRHs
+ PT_ATTACH = 30 // Attach to a process
+ PT_DETACH = 31 // Detach from a process
+ PT_REGSET = 32 // Read all GPRs
+ PT_REATTACH = 33 // Reattach to a process
+ PT_LDINFO = 34 // Read loader info
+ PT_MULTI = 35 // Multi process mode
+ PT_LD64INFO = 36 // RMODE64 Info Area
+ PT_BLOCKREQ = 40 // Block request
+ PT_THREAD_INFO = 60 // Read thread info
+ PT_THREAD_MODIFY = 61
+ PT_THREAD_READ_FOCUS = 62
+ PT_THREAD_WRITE_FOCUS = 63
+ PT_THREAD_HOLD = 64
+ PT_THREAD_SIGNAL = 65
+ PT_EXPLAIN = 66
+ PT_EVENTS = 67
+ PT_THREAD_INFO_EXTENDED = 68
+ PT_REATTACH2 = 71
+ PT_CAPTURE = 72
+ PT_UNCAPTURE = 73
+ PT_GET_THREAD_TCB = 74
+ PT_GET_ALET = 75
+ PT_SWAPIN = 76
+ PT_EXTENDED_EVENT = 98
+ PT_RECOVER = 99 // Debug a program check
+ PT_GPR0 = 0 // General purpose register 0
+ PT_GPR1 = 1 // General purpose register 1
+ PT_GPR2 = 2 // General purpose register 2
+ PT_GPR3 = 3 // General purpose register 3
+ PT_GPR4 = 4 // General purpose register 4
+ PT_GPR5 = 5 // General purpose register 5
+ PT_GPR6 = 6 // General purpose register 6
+ PT_GPR7 = 7 // General purpose register 7
+ PT_GPR8 = 8 // General purpose register 8
+ PT_GPR9 = 9 // General purpose register 9
+ PT_GPR10 = 10 // General purpose register 10
+ PT_GPR11 = 11 // General purpose register 11
+ PT_GPR12 = 12 // General purpose register 12
+ PT_GPR13 = 13 // General purpose register 13
+ PT_GPR14 = 14 // General purpose register 14
+ PT_GPR15 = 15 // General purpose register 15
+ PT_FPR0 = 16 // Floating point register 0
+ PT_FPR1 = 17 // Floating point register 1
+ PT_FPR2 = 18 // Floating point register 2
+ PT_FPR3 = 19 // Floating point register 3
+ PT_FPR4 = 20 // Floating point register 4
+ PT_FPR5 = 21 // Floating point register 5
+ PT_FPR6 = 22 // Floating point register 6
+ PT_FPR7 = 23 // Floating point register 7
+ PT_FPR8 = 24 // Floating point register 8
+ PT_FPR9 = 25 // Floating point register 9
+ PT_FPR10 = 26 // Floating point register 10
+ PT_FPR11 = 27 // Floating point register 11
+ PT_FPR12 = 28 // Floating point register 12
+ PT_FPR13 = 29 // Floating point register 13
+ PT_FPR14 = 30 // Floating point register 14
+ PT_FPR15 = 31 // Floating point register 15
+ PT_FPC = 32 // Floating point control register
+ PT_PSW = 40 // PSW
+ PT_PSW0 = 40 // Left half of the PSW
+ PT_PSW1 = 41 // Right half of the PSW
+ PT_CR0 = 42 // Control register 0
+ PT_CR1 = 43 // Control register 1
+ PT_CR2 = 44 // Control register 2
+ PT_CR3 = 45 // Control register 3
+ PT_CR4 = 46 // Control register 4
+ PT_CR5 = 47 // Control register 5
+ PT_CR6 = 48 // Control register 6
+ PT_CR7 = 49 // Control register 7
+ PT_CR8 = 50 // Control register 8
+ PT_CR9 = 51 // Control register 9
+ PT_CR10 = 52 // Control register 10
+ PT_CR11 = 53 // Control register 11
+ PT_CR12 = 54 // Control register 12
+ PT_CR13 = 55 // Control register 13
+ PT_CR14 = 56 // Control register 14
+ PT_CR15 = 57 // Control register 15
+ PT_GPRH0 = 58 // GP High register 0
+ PT_GPRH1 = 59 // GP High register 1
+ PT_GPRH2 = 60 // GP High register 2
+ PT_GPRH3 = 61 // GP High register 3
+ PT_GPRH4 = 62 // GP High register 4
+ PT_GPRH5 = 63 // GP High register 5
+ PT_GPRH6 = 64 // GP High register 6
+ PT_GPRH7 = 65 // GP High register 7
+ PT_GPRH8 = 66 // GP High register 8
+ PT_GPRH9 = 67 // GP High register 9
+ PT_GPRH10 = 68 // GP High register 10
+ PT_GPRH11 = 69 // GP High register 11
+ PT_GPRH12 = 70 // GP High register 12
+ PT_GPRH13 = 71 // GP High register 13
+ PT_GPRH14 = 72 // GP High register 14
+ PT_GPRH15 = 73 // GP High register 15
+ PT_VR0 = 74 // Vector register 0
+ PT_VR1 = 75 // Vector register 1
+ PT_VR2 = 76 // Vector register 2
+ PT_VR3 = 77 // Vector register 3
+ PT_VR4 = 78 // Vector register 4
+ PT_VR5 = 79 // Vector register 5
+ PT_VR6 = 80 // Vector register 6
+ PT_VR7 = 81 // Vector register 7
+ PT_VR8 = 82 // Vector register 8
+ PT_VR9 = 83 // Vector register 9
+ PT_VR10 = 84 // Vector register 10
+ PT_VR11 = 85 // Vector register 11
+ PT_VR12 = 86 // Vector register 12
+ PT_VR13 = 87 // Vector register 13
+ PT_VR14 = 88 // Vector register 14
+ PT_VR15 = 89 // Vector register 15
+ PT_VR16 = 90 // Vector register 16
+ PT_VR17 = 91 // Vector register 17
+ PT_VR18 = 92 // Vector register 18
+ PT_VR19 = 93 // Vector register 19
+ PT_VR20 = 94 // Vector register 20
+ PT_VR21 = 95 // Vector register 21
+ PT_VR22 = 96 // Vector register 22
+ PT_VR23 = 97 // Vector register 23
+ PT_VR24 = 98 // Vector register 24
+ PT_VR25 = 99 // Vector register 25
+ PT_VR26 = 100 // Vector register 26
+ PT_VR27 = 101 // Vector register 27
+ PT_VR28 = 102 // Vector register 28
+ PT_VR29 = 103 // Vector register 29
+ PT_VR30 = 104 // Vector register 30
+ PT_VR31 = 105 // Vector register 31
+ PT_PSWG = 106 // PSWG
+ PT_PSWG0 = 106 // Bytes 0-3
+ PT_PSWG1 = 107 // Bytes 4-7
+ PT_PSWG2 = 108 // Bytes 8-11 (IA high word)
+ PT_PSWG3 = 109 // Bytes 12-15 (IA low word)
+)
+
+func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) {
+ var parms [8]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&request)
+ parms[1] = unsafe.Pointer(&pid)
+ parms[2] = unsafe.Pointer(&addr)
+ parms[3] = unsafe.Pointer(&data)
+ parms[4] = unsafe.Pointer(&buffer)
+ parms[5] = unsafe.Pointer(&rv)
+ parms[6] = unsafe.Pointer(&rc)
+ parms[7] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4PTR)
+ return rv, rc, rn
+}
+
+func copyU8(val uint8, dest []uint8) int {
+ if len(dest) < 1 {
+ return 0
+ }
+ dest[0] = val
+ return 1
+}
+
+func copyU8Arr(src, dest []uint8) int {
+ if len(dest) < len(src) {
+ return 0
+ }
+ for i, v := range src {
+ dest[i] = v
+ }
+ return len(src)
+}
+
+func copyU16(val uint16, dest []uint16) int {
+ if len(dest) < 1 {
+ return 0
+ }
+ dest[0] = val
+ return 1
+}
+
+func copyU32(val uint32, dest []uint32) int {
+ if len(dest) < 1 {
+ return 0
+ }
+ dest[0] = val
+ return 1
+}
+
+func copyU32Arr(src, dest []uint32) int {
+ if len(dest) < len(src) {
+ return 0
+ }
+ for i, v := range src {
+ dest[i] = v
+ }
+ return len(src)
+}
+
+func copyU64(val uint64, dest []uint64) int {
+ if len(dest) < 1 {
+ return 0
+ }
+ dest[0] = val
+ return 1
+}
diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s
new file mode 100644
index 0000000..4bd4a17
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s
@@ -0,0 +1,192 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// function to call USS assembly language services
+//
+// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm
+//
+// arg1 unsafe.Pointer array that ressembles an OS PLIST
+//
+// arg2 function offset as in
+// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm
+//
+// func bpxcall(plist []unsafe.Pointer, bpx_offset int64)
+
+TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0
+ MOVD plist_base+0(FP), R1 // r1 points to plist
+ MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table
+ MOVD R14, R7 // save r14
+ MOVD R15, R8 // save r15
+ MOVWZ 16(R0), R9
+ MOVWZ 544(R9), R9
+ MOVWZ 24(R9), R9 // call vector in r9
+ ADD R2, R9 // add offset to vector table
+ MOVWZ (R9), R9 // r9 points to entry point
+ BYTE $0x0D // BL R14,R9 --> basr r14,r9
+ BYTE $0xE9 // clobbers 0,1,14,15
+ MOVD R8, R15 // restore 15
+ JMP R7 // return via saved return address
+
+// func A2e(arr [] byte)
+// code page conversion from 819 to 1047
+TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0
+ MOVD arg_base+0(FP), R2 // pointer to arry of characters
+ MOVD arg_len+8(FP), R3 // count
+ XOR R0, R0
+ XOR R1, R1
+ BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2))
+
+ // ASCII -> EBCDIC conversion table:
+ BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03
+ BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f
+ BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b
+ BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f
+ BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13
+ BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26
+ BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27
+ BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f
+ BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b
+ BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d
+ BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e
+ BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61
+ BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3
+ BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7
+ BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e
+ BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f
+ BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3
+ BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7
+ BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2
+ BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6
+ BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2
+ BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6
+ BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad
+ BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d
+ BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83
+ BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87
+ BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92
+ BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96
+ BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2
+ BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6
+ BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0
+ BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07
+ BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23
+ BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17
+ BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b
+ BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b
+ BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33
+ BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08
+ BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b
+ BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff
+ BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1
+ BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5
+ BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a
+ BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc
+ BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa
+ BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3
+ BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b
+ BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab
+ BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66
+ BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68
+ BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73
+ BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77
+ BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee
+ BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf
+ BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb
+ BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59
+ BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46
+ BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48
+ BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53
+ BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57
+ BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce
+ BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1
+ BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb
+ BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf
+
+retry:
+ WORD $0xB9931022 // TROO 2,2,b'0001'
+ BVS retry
+ RET
+
+// func e2a(arr [] byte)
+// code page conversion from 1047 to 819
+TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0
+ MOVD arg_base+0(FP), R2 // pointer to arry of characters
+ MOVD arg_len+8(FP), R3 // count
+ XOR R0, R0
+ XOR R1, R1
+ BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2))
+
+ // EBCDIC -> ASCII conversion table:
+ BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03
+ BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f
+ BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b
+ BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f
+ BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13
+ BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87
+ BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f
+ BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f
+ BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83
+ BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b
+ BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b
+ BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07
+ BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93
+ BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04
+ BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b
+ BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a
+ BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4
+ BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5
+ BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e
+ BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c
+ BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb
+ BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef
+ BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24
+ BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e
+ BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4
+ BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5
+ BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c
+ BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f
+ BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb
+ BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf
+ BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23
+ BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22
+ BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63
+ BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67
+ BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb
+ BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1
+ BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c
+ BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70
+ BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba
+ BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4
+ BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74
+ BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78
+ BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf
+ BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae
+ BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7
+ BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc
+ BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8
+ BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7
+ BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43
+ BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47
+ BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4
+ BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5
+ BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c
+ BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50
+ BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb
+ BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff
+ BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54
+ BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58
+ BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4
+ BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5
+ BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33
+ BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37
+ BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb
+ BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f
+
+retry:
+ WORD $0xB9931022 // TROO 2,2,b'0001'
+ BVS retry
+ RET
diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go
new file mode 100644
index 0000000..a086578
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -0,0 +1,195 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd
+
+package unix
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c
+
+const (
+ // This is the version of CapRights this package understands. See C implementation for parallels.
+ capRightsGoVersion = CAP_RIGHTS_VERSION_00
+ capArSizeMin = CAP_RIGHTS_VERSION_00 + 2
+ capArSizeMax = capRightsGoVersion + 2
+)
+
+var (
+ bit2idx = []int{
+ -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
+ 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ }
+)
+
+func capidxbit(right uint64) int {
+ return int((right >> 57) & 0x1f)
+}
+
+func rightToIndex(right uint64) (int, error) {
+ idx := capidxbit(right)
+ if idx < 0 || idx >= len(bit2idx) {
+ return -2, fmt.Errorf("index for right 0x%x out of range", right)
+ }
+ return bit2idx[idx], nil
+}
+
+func caprver(right uint64) int {
+ return int(right >> 62)
+}
+
+func capver(rights *CapRights) int {
+ return caprver(rights.Rights[0])
+}
+
+func caparsize(rights *CapRights) int {
+ return capver(rights) + 2
+}
+
+// CapRightsSet sets the permissions in setrights in rights.
+func CapRightsSet(rights *CapRights, setrights []uint64) error {
+ // This is essentially a copy of cap_rights_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errors.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch")
+ }
+ rights.Rights[i] |= right
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsClear clears the permissions in clearrights from rights.
+func CapRightsClear(rights *CapRights, clearrights []uint64) error {
+ // This is essentially a copy of cap_rights_vclear()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errors.New("bad rights size")
+ }
+
+ for _, right := range clearrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch")
+ }
+ rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsIsSet checks whether all the permissions in setrights are present in rights.
+func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
+ // This is essentially a copy of cap_rights_is_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return false, fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return false, errors.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return false, errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return false, err
+ }
+ if i >= n {
+ return false, errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return false, errors.New("index mismatch")
+ }
+ if (rights.Rights[i] & right) != right {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func capright(idx uint64, bit uint64) uint64 {
+ return ((1 << (57 + idx)) | bit)
+}
+
+// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights.
+// See man cap_rights_init(3) and rights(4).
+func CapRightsInit(rights []uint64) (*CapRights, error) {
+ var r CapRights
+ r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0)
+ r.Rights[1] = capright(1, 0)
+
+ err := CapRightsSet(&r, rights)
+ if err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights.
+// The capability rights on fd can never be increased by CapRightsLimit.
+// See man cap_rights_limit(2) and rights(4).
+func CapRightsLimit(fd uintptr, rights *CapRights) error {
+ return capRightsLimit(int(fd), rights)
+}
+
+// CapRightsGet returns a CapRights structure containing the operations permitted on fd.
+// See man cap_rights_get(3) and rights(4).
+func CapRightsGet(fd uintptr) (*CapRights, error) {
+ r, err := CapRightsInit(nil)
+ if err != nil {
+ return nil, err
+ }
+ err = capRightsGet(capRightsGoVersion, int(fd), r)
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go
new file mode 100644
index 0000000..6fb7cb7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/constants.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+
+package unix
+
+const (
+ R_OK = 0x4
+ W_OK = 0x2
+ X_OK = 0x1
+)
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
new file mode 100644
index 0000000..d785134
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
@@ -0,0 +1,26 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix && ppc
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0xffff)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return uint64(((major) << 16) | (minor))
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
new file mode 100644
index 0000000..623a5e6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
@@ -0,0 +1,28 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix && ppc64
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x3fffffff00000000) >> 32)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32((dev & 0x00000000ffffffff) >> 0)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ var DEVNO64 uint64
+ DEVNO64 = 0x8000000000000000
+ return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go
new file mode 100644
index 0000000..8d1dc0f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Darwin's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a Darwin device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 24) & 0xff)
+}
+
+// Minor returns the minor component of a Darwin device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffffff)
+}
+
+// Mkdev returns a Darwin device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 24) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
new file mode 100644
index 0000000..8502f20
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Dragonfly's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a DragonFlyBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a DragonFlyBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a DragonFlyBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go
new file mode 100644
index 0000000..eba3b4b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_freebsd.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in FreeBSD's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a FreeBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a FreeBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a FreeBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go
new file mode 100644
index 0000000..d165d6f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_linux.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by the Linux kernel and glibc.
+//
+// The information below is extracted and adapted from bits/sysmacros.h in the
+// glibc sources:
+//
+// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
+// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
+// number and m is a hex digit of the minor number. This is backward compatible
+// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
+// backward compatible with the Linux kernel, which for some architectures uses
+// 32-bit dev_t, encoded as mmmM MMmm.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ major := uint32((dev & 0x00000000000fff00) >> 8)
+ major |= uint32((dev & 0xfffff00000000000) >> 32)
+ return major
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x00000000000000ff) >> 0)
+ minor |= uint32((dev & 0x00000ffffff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) & 0x00000fff) << 8
+ dev |= (uint64(major) & 0xfffff000) << 32
+ dev |= (uint64(minor) & 0x000000ff) << 0
+ dev |= (uint64(minor) & 0xffffff00) << 12
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go
new file mode 100644
index 0000000..b4a203d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_netbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in NetBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a NetBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x000fff00) >> 8)
+}
+
+// Minor returns the minor component of a NetBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xfff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a NetBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x000fff00
+ dev |= (uint64(minor) << 12) & 0xfff00000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go
new file mode 100644
index 0000000..f3430c4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_openbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in OpenBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of an OpenBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x0000ff00) >> 8)
+}
+
+// Minor returns the minor component of an OpenBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xffff0000) >> 8)
+ return minor
+}
+
+// Mkdev returns an OpenBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x0000ff00
+ dev |= (uint64(minor) << 8) & 0xffff0000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go
new file mode 100644
index 0000000..bb6a64f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_zos.go
@@ -0,0 +1,28 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by z/OS.
+//
+// The information below is extracted and adapted from macros.
+
+package unix
+
+// Major returns the major component of a z/OS device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0x0000FFFF)
+}
+
+// Minor returns the minor component of a z/OS device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0x0000FFFF)
+}
+
+// Mkdev returns a z/OS device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 16) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go
new file mode 100644
index 0000000..1ebf117
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dirent.go
@@ -0,0 +1,102 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+
+package unix
+
+import "unsafe"
+
+// readInt returns the size-bytes unsigned integer in native byte order at offset off.
+func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
+ if len(b) < int(off+size) {
+ return 0, false
+ }
+ if isBigEndian {
+ return readIntBE(b[off:], size), true
+ }
+ return readIntLE(b[off:], size), true
+}
+
+func readIntBE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[1]) | uint64(b[0])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+func readIntLE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number of
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ reclen, ok := direntReclen(buf)
+ if !ok || reclen > uint64(len(buf)) {
+ return origlen, count, names
+ }
+ rec := buf[:reclen]
+ buf = buf[reclen:]
+ ino, ok := direntIno(rec)
+ if !ok {
+ break
+ }
+ if ino == 0 { // File absent in directory.
+ continue
+ }
+ const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
+ namlen, ok := direntNamlen(rec)
+ if !ok || namoff+namlen > uint64(len(rec)) {
+ break
+ }
+ name := rec[namoff : namoff+namlen]
+ for i, c := range name {
+ if c == 0 {
+ name = name[:i]
+ break
+ }
+ }
+ // Check for useless names before allocating a string.
+ if string(name) == "." || string(name) == ".." {
+ continue
+ }
+ max--
+ count++
+ names = append(names, string(name))
+ }
+ return origlen - len(buf), count, names
+}
diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go
new file mode 100644
index 0000000..1095fd3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_big.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
+
+package unix
+
+const isBigEndian = true
diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go
new file mode 100644
index 0000000..b9f0e27
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_little.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
+
+package unix
+
+const isBigEndian = false
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
new file mode 100644
index 0000000..a96da71
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/env_unix.go
@@ -0,0 +1,31 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+
+// Unix environment variables.
+
+package unix
+
+import "syscall"
+
+func Getenv(key string) (value string, found bool) {
+ return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+ return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+ syscall.Clearenv()
+}
+
+func Environ() []string {
+ return syscall.Environ()
+}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go
new file mode 100644
index 0000000..6200876
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd
+
+package unix
+
+import "unsafe"
+
+// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
+// systems by fcntl_linux_32bit.go to be SYS_FCNTL64.
+var fcntl64Syscall uintptr = SYS_FCNTL
+
+func fcntl(fd int, cmd, arg int) (int, error) {
+ valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg))
+ var err error
+ if errno != 0 {
+ err = errno
+ }
+ return int(valptr), err
+}
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
+ if errno == 0 {
+ return nil
+ }
+ return errno
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
new file mode 100644
index 0000000..a9911c7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "unsafe"
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk))))
+ return err
+}
+
+// FcntlFstore performs a fcntl syscall for the F_PREALLOCATE command.
+func FcntlFstore(fd uintptr, cmd int, fstore *Fstore_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(fstore))))
+ return err
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
new file mode 100644
index 0000000..13b4acd
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc)
+
+package unix
+
+func init() {
+ // On 32-bit Linux systems, the fcntl syscall that matches Go's
+ // Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
+ fcntl64Syscall = SYS_FCNTL64
+}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
new file mode 100644
index 0000000..9e83d18
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fdset.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+
+package unix
+
+// Set adds fd to the set fds.
+func (fds *FdSet) Set(fd int) {
+ fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// Clear removes fd from the set fds.
+func (fds *FdSet) Clear(fd int) {
+ fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// IsSet returns whether fd is in the set fds.
+func (fds *FdSet) IsSet(fd int) bool {
+ return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0
+}
+
+// Zero clears the set fds.
+func (fds *FdSet) Zero() {
+ for i := range fds.Bits {
+ fds.Bits[i] = 0
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
new file mode 100644
index 0000000..aca5721
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo.go
@@ -0,0 +1,59 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo && !aix && !hurd
+
+package unix
+
+import "syscall"
+
+// We can't use the gc-syntax .s files for gccgo. On the plus side
+// much of the functionality can be written directly in Go.
+
+func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
+
+func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
+
+func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ syscall.Entersyscall()
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0
+}
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0
+}
+
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
new file mode 100644
index 0000000..d468b7b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -0,0 +1,44 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo && !aix && !hurd
+
+#include
+#include
+#include
+
+#define _STRINGIFY2_(x) #x
+#define _STRINGIFY_(x) _STRINGIFY2_(x)
+#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
+
+// Call syscall from C code because the gccgo support for calling from
+// Go to C does not support varargs functions.
+
+struct ret {
+ uintptr_t r;
+ uintptr_t err;
+};
+
+struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+ __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscall");
+
+struct ret
+gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ struct ret r;
+
+ errno = 0;
+ r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ r.err = errno;
+ return r;
+}
+
+uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+ __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscallNoError");
+
+uintptr_t
+gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
new file mode 100644
index 0000000..972d61b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo && linux && amd64
+
+package unix
+
+import "syscall"
+
+//extern gettimeofday
+func realGettimeofday(*Timeval, *byte) int32
+
+func gettimeofday(tv *Timeval) (err syscall.Errno) {
+ r := realGettimeofday(tv, nil)
+ if r < 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go
new file mode 100644
index 0000000..848840a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -0,0 +1,141 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+
+package unix
+
+import (
+ "unsafe"
+)
+
+// Helpers for dealing with ifreq since it contains a union and thus requires a
+// lot of unsafe.Pointer casts to use properly.
+
+// An Ifreq is a type-safe wrapper around the raw ifreq struct. An Ifreq
+// contains an interface name and a union of arbitrary data which can be
+// accessed using the Ifreq's methods. To create an Ifreq, use the NewIfreq
+// function.
+//
+// Use the Name method to access the stored interface name. The union data
+// fields can be get and set using the following methods:
+// - Uint16/SetUint16: flags
+// - Uint32/SetUint32: ifindex, metric, mtu
+type Ifreq struct{ raw ifreq }
+
+// NewIfreq creates an Ifreq with the input network interface name after
+// validating the name does not exceed IFNAMSIZ-1 (trailing NULL required)
+// bytes.
+func NewIfreq(name string) (*Ifreq, error) {
+ // Leave room for terminating NULL byte.
+ if len(name) >= IFNAMSIZ {
+ return nil, EINVAL
+ }
+
+ var ifr ifreq
+ copy(ifr.Ifrn[:], name)
+
+ return &Ifreq{raw: ifr}, nil
+}
+
+// TODO(mdlayher): get/set methods for hardware address sockaddr, char array, etc.
+
+// Name returns the interface name associated with the Ifreq.
+func (ifr *Ifreq) Name() string {
+ return ByteSliceToString(ifr.raw.Ifrn[:])
+}
+
+// According to netdevice(7), only AF_INET addresses are returned for numerous
+// sockaddr ioctls. For convenience, we expose these as Inet4Addr since the Port
+// field and other data is always empty.
+
+// Inet4Addr returns the Ifreq union data from an embedded sockaddr as a C
+// in_addr/Go []byte (4-byte IPv4 address) value. If the sockaddr family is not
+// AF_INET, an error is returned.
+func (ifr *Ifreq) Inet4Addr() ([]byte, error) {
+ raw := *(*RawSockaddrInet4)(unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]))
+ if raw.Family != AF_INET {
+ // Cannot safely interpret raw.Addr bytes as an IPv4 address.
+ return nil, EINVAL
+ }
+
+ return raw.Addr[:], nil
+}
+
+// SetInet4Addr sets a C in_addr/Go []byte (4-byte IPv4 address) value in an
+// embedded sockaddr within the Ifreq's union data. v must be 4 bytes in length
+// or an error will be returned.
+func (ifr *Ifreq) SetInet4Addr(v []byte) error {
+ if len(v) != 4 {
+ return EINVAL
+ }
+
+ var addr [4]byte
+ copy(addr[:], v)
+
+ ifr.clear()
+ *(*RawSockaddrInet4)(
+ unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]),
+ ) = RawSockaddrInet4{
+ // Always set IP family as ioctls would require it anyway.
+ Family: AF_INET,
+ Addr: addr,
+ }
+
+ return nil
+}
+
+// Uint16 returns the Ifreq union data as a C short/Go uint16 value.
+func (ifr *Ifreq) Uint16() uint16 {
+ return *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0]))
+}
+
+// SetUint16 sets a C short/Go uint16 value as the Ifreq's union data.
+func (ifr *Ifreq) SetUint16(v uint16) {
+ ifr.clear()
+ *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) = v
+}
+
+// Uint32 returns the Ifreq union data as a C int/Go uint32 value.
+func (ifr *Ifreq) Uint32() uint32 {
+ return *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0]))
+}
+
+// SetUint32 sets a C int/Go uint32 value as the Ifreq's union data.
+func (ifr *Ifreq) SetUint32(v uint32) {
+ ifr.clear()
+ *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) = v
+}
+
+// clear zeroes the ifreq's union field to prevent trailing garbage data from
+// being sent to the kernel if an ifreq is reused.
+func (ifr *Ifreq) clear() {
+ for i := range ifr.raw.Ifru {
+ ifr.raw.Ifru[i] = 0
+ }
+}
+
+// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
+// IoctlGetEthtoolDrvinfo which use these APIs under the hood.
+
+// An ifreqData is an Ifreq which carries pointer data. To produce an ifreqData,
+// use the Ifreq.withData method.
+type ifreqData struct {
+ name [IFNAMSIZ]byte
+ // A type separate from ifreq is required in order to comply with the
+ // unsafe.Pointer rules since the "pointer-ness" of data would not be
+ // preserved if it were cast into the byte array of a raw ifreq.
+ data unsafe.Pointer
+ // Pad to the same size as ifreq.
+ _ [len(ifreq{}.Ifru) - SizeofPtr]byte
+}
+
+// withData produces an ifreqData with the pointer p set for ioctls which require
+// arbitrary pointer data.
+func (ifr Ifreq) withData(p unsafe.Pointer) ifreqData {
+ return ifreqData{
+ name: ifr.raw.Ifrn,
+ data: p,
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go
new file mode 100644
index 0000000..7ca4fa1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -0,0 +1,334 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "unsafe"
+
+// IoctlRetInt performs an ioctl operation specified by req on a device
+// associated with opened file descriptor fd, and returns a non-negative
+// integer that is returned by the ioctl syscall.
+func IoctlRetInt(fd int, req uint) (int, error) {
+ ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(ret), nil
+}
+
+func IoctlGetUint32(fd int, req uint) (uint32, error) {
+ var value uint32
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return value, err
+}
+
+func IoctlGetRTCTime(fd int) (*RTCTime, error) {
+ var value RTCTime
+ err := ioctlPtr(fd, RTC_RD_TIME, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlSetRTCTime(fd int, value *RTCTime) error {
+ return ioctlPtr(fd, RTC_SET_TIME, unsafe.Pointer(value))
+}
+
+func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
+ var value RTCWkAlrm
+ err := ioctlPtr(fd, RTC_WKALM_RD, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
+ return ioctlPtr(fd, RTC_WKALM_SET, unsafe.Pointer(value))
+}
+
+// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network
+// device specified by ifname.
+func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return nil, err
+ }
+
+ value := EthtoolDrvinfo{Cmd: ETHTOOL_GDRVINFO}
+ ifrd := ifr.withData(unsafe.Pointer(&value))
+
+ err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
+ return &value, err
+}
+
+// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC
+// association for the network device specified by ifname.
+func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return nil, err
+ }
+
+ value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO}
+ ifrd := ifr.withData(unsafe.Pointer(&value))
+
+ err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
+ return &value, err
+}
+
+// IoctlGetHwTstamp retrieves the hardware timestamping configuration
+// for the network device specified by ifname.
+func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return nil, err
+ }
+
+ value := HwTstampConfig{}
+ ifrd := ifr.withData(unsafe.Pointer(&value))
+
+ err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd)
+ return &value, err
+}
+
+// IoctlSetHwTstamp updates the hardware timestamping configuration for
+// the network device specified by ifname.
+func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return err
+ }
+ ifrd := ifr.withData(unsafe.Pointer(cfg))
+ return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd)
+}
+
+// FdToClockID derives the clock ID from the file descriptor number
+// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is
+// suitable for system calls like ClockGettime.
+func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) }
+
+// IoctlPtpClockGetcaps returns the description of a given PTP device.
+func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) {
+ var value PtpClockCaps
+ err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpSysOffsetPrecise returns a description of the clock
+// offset compared to the system clock.
+func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) {
+ var value PtpSysOffsetPrecise
+ err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpSysOffsetExtended returns an extended description of the
+// clock offset compared to the system clock. The samples parameter
+// specifies the desired number of measurements.
+func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) {
+ value := PtpSysOffsetExtended{Samples: uint32(samples)}
+ err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpPinGetfunc returns the configuration of the specified
+// I/O pin on given PTP device.
+func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) {
+ value := PtpPinDesc{Index: uint32(index)}
+ err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpPinSetfunc updates configuration of the specified PTP
+// I/O pin.
+func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error {
+ return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd))
+}
+
+// IoctlPtpPeroutRequest configures the periodic output mode of the
+// PTP I/O pins.
+func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error {
+ return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r))
+}
+
+// IoctlPtpExttsRequest configures the external timestamping mode
+// of the PTP I/O pins.
+func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error {
+ return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r))
+}
+
+// IoctlGetWatchdogInfo fetches information about a watchdog device from the
+// Linux watchdog API. For more information, see:
+// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
+func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) {
+ var value WatchdogInfo
+ err := ioctlPtr(fd, WDIOC_GETSUPPORT, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For
+// more information, see:
+// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
+func IoctlWatchdogKeepalive(fd int) error {
+ // arg is ignored and not a pointer, so ioctl is fine instead of ioctlPtr.
+ return ioctl(fd, WDIOC_KEEPALIVE, 0)
+}
+
+// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the
+// range of data conveyed in value to the file associated with the file
+// descriptor destFd. See the ioctl_ficlonerange(2) man page for details.
+func IoctlFileCloneRange(destFd int, value *FileCloneRange) error {
+ return ioctlPtr(destFd, FICLONERANGE, unsafe.Pointer(value))
+}
+
+// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file
+// associated with the file description srcFd to the file associated with the
+// file descriptor destFd. See the ioctl_ficlone(2) man page for details.
+func IoctlFileClone(destFd, srcFd int) error {
+ return ioctl(destFd, FICLONE, uintptr(srcFd))
+}
+
+type FileDedupeRange struct {
+ Src_offset uint64
+ Src_length uint64
+ Reserved1 uint16
+ Reserved2 uint32
+ Info []FileDedupeRangeInfo
+}
+
+type FileDedupeRangeInfo struct {
+ Dest_fd int64
+ Dest_offset uint64
+ Bytes_deduped uint64
+ Status int32
+ Reserved uint32
+}
+
+// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the
+// range of data conveyed in value from the file associated with the file
+// descriptor srcFd to the value.Info destinations. See the
+// ioctl_fideduperange(2) man page for details.
+func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error {
+ buf := make([]byte, SizeofRawFileDedupeRange+
+ len(value.Info)*SizeofRawFileDedupeRangeInfo)
+ rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0]))
+ rawrange.Src_offset = value.Src_offset
+ rawrange.Src_length = value.Src_length
+ rawrange.Dest_count = uint16(len(value.Info))
+ rawrange.Reserved1 = value.Reserved1
+ rawrange.Reserved2 = value.Reserved2
+
+ for i := range value.Info {
+ rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
+ uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
+ uintptr(i*SizeofRawFileDedupeRangeInfo)))
+ rawinfo.Dest_fd = value.Info[i].Dest_fd
+ rawinfo.Dest_offset = value.Info[i].Dest_offset
+ rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped
+ rawinfo.Status = value.Info[i].Status
+ rawinfo.Reserved = value.Info[i].Reserved
+ }
+
+ err := ioctlPtr(srcFd, FIDEDUPERANGE, unsafe.Pointer(&buf[0]))
+
+ // Output
+ for i := range value.Info {
+ rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
+ uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
+ uintptr(i*SizeofRawFileDedupeRangeInfo)))
+ value.Info[i].Dest_fd = rawinfo.Dest_fd
+ value.Info[i].Dest_offset = rawinfo.Dest_offset
+ value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped
+ value.Info[i].Status = rawinfo.Status
+ value.Info[i].Reserved = rawinfo.Reserved
+ }
+
+ return err
+}
+
+func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error {
+ return ioctlPtr(fd, HIDIOCGRDESC, unsafe.Pointer(value))
+}
+
+func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) {
+ var value HIDRawDevInfo
+ err := ioctlPtr(fd, HIDIOCGRAWINFO, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlHIDGetRawName(fd int) (string, error) {
+ var value [_HIDIOCGRAWNAME_LEN]byte
+ err := ioctlPtr(fd, _HIDIOCGRAWNAME, unsafe.Pointer(&value[0]))
+ return ByteSliceToString(value[:]), err
+}
+
+func IoctlHIDGetRawPhys(fd int) (string, error) {
+ var value [_HIDIOCGRAWPHYS_LEN]byte
+ err := ioctlPtr(fd, _HIDIOCGRAWPHYS, unsafe.Pointer(&value[0]))
+ return ByteSliceToString(value[:]), err
+}
+
+func IoctlHIDGetRawUniq(fd int) (string, error) {
+ var value [_HIDIOCGRAWUNIQ_LEN]byte
+ err := ioctlPtr(fd, _HIDIOCGRAWUNIQ, unsafe.Pointer(&value[0]))
+ return ByteSliceToString(value[:]), err
+}
+
+// IoctlIfreq performs an ioctl using an Ifreq structure for input and/or
+// output. See the netdevice(7) man page for details.
+func IoctlIfreq(fd int, req uint, value *Ifreq) error {
+ // It is possible we will add more fields to *Ifreq itself later to prevent
+ // misuse, so pass the raw *ifreq directly.
+ return ioctlPtr(fd, req, unsafe.Pointer(&value.raw))
+}
+
+// TODO(mdlayher): export if and when IfreqData is exported.
+
+// ioctlIfreqData performs an ioctl using an ifreqData structure for input
+// and/or output. See the netdevice(7) man page for details.
+func ioctlIfreqData(fd int, req uint, value *ifreqData) error {
+ // The memory layout of IfreqData (type-safe) and ifreq (not type-safe) are
+ // identical so pass *IfreqData directly.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlKCMClone attaches a new file descriptor to a multiplexor by cloning an
+// existing KCM socket, returning a structure containing the file descriptor of
+// the new socket.
+func IoctlKCMClone(fd int) (*KCMClone, error) {
+ var info KCMClone
+ if err := ioctlPtr(fd, SIOCKCMCLONE, unsafe.Pointer(&info)); err != nil {
+ return nil, err
+ }
+
+ return &info, nil
+}
+
+// IoctlKCMAttach attaches a TCP socket and associated BPF program file
+// descriptor to a multiplexor.
+func IoctlKCMAttach(fd int, info KCMAttach) error {
+ return ioctlPtr(fd, SIOCKCMATTACH, unsafe.Pointer(&info))
+}
+
+// IoctlKCMUnattach unattaches a TCP socket file descriptor from a multiplexor.
+func IoctlKCMUnattach(fd int, info KCMUnattach) error {
+ return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info))
+}
+
+// IoctlLoopGetStatus64 gets the status of the loop device associated with the
+// file descriptor fd using the LOOP_GET_STATUS64 operation.
+func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) {
+ var value LoopInfo64
+ if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil {
+ return nil, err
+ }
+ return &value, nil
+}
+
+// IoctlLoopSetStatus64 sets the status of the loop device associated with the
+// file descriptor fd using the LOOP_SET_STATUS64 operation.
+func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
+ return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
+}
+
+// IoctlLoopConfigure configures all loop device parameters in a single step
+func IoctlLoopConfigure(fd int, value *LoopConfig) error {
+ return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value))
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go
new file mode 100644
index 0000000..5b0759b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go
@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || solaris
+
+package unix
+
+import (
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req int, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetPointerInt performs an ioctl operation which sets an
+// integer value on fd, using the specified request number. The ioctl
+// argument is called with a pointer to the integer value, rather than
+// passing the integer value directly.
+func IoctlSetPointerInt(fd int, req int, value int) error {
+ v := int32(value)
+ return ioctlPtr(fd, req, unsafe.Pointer(&v))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req int, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value will usually be TCSETA or TIOCSETA.
+func IoctlSetTermios(fd int, req int, value *Termios) error {
+ // TODO: if we get the chance, remove the req parameter.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req int) (int, error) {
+ var value int
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
+ var value Winsize
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req int) (*Termios, error) {
+ var value Termios
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
new file mode 100644
index 0000000..20f470b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd
+
+package unix
+
+import (
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req uint, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetPointerInt performs an ioctl operation which sets an
+// integer value on fd, using the specified request number. The ioctl
+// argument is called with a pointer to the integer value, rather than
+// passing the integer value directly.
+func IoctlSetPointerInt(fd int, req uint, value int) error {
+ v := int32(value)
+ return ioctlPtr(fd, req, unsafe.Pointer(&v))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value will usually be TCSETA or TIOCSETA.
+func IoctlSetTermios(fd int, req uint, value *Termios) error {
+ // TODO: if we get the chance, remove the req parameter.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req uint) (int, error) {
+ var value int
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
+ var value Winsize
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req uint) (*Termios, error) {
+ var value Termios
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go
new file mode 100644
index 0000000..c8b2a75
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go
@@ -0,0 +1,71 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req int, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req int, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value is expected to be TCSETS, TCSETSW, or TCSETSF
+func IoctlSetTermios(fd int, req int, value *Termios) error {
+ if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) {
+ return ENOSYS
+ }
+ err := Tcsetattr(fd, int(req), value)
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req int) (int, error) {
+ var value int
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
+ var value Winsize
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlGetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value is expected to be TCGETS
+func IoctlGetTermios(fd int, req int) (*Termios, error) {
+ var value Termios
+ if req != TCGETS {
+ return &value, ENOSYS
+ }
+ err := Tcgetattr(fd, &value)
+ return &value, err
+}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
new file mode 100644
index 0000000..e6f31d3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -0,0 +1,249 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script runs or (given -n) prints suggested commands to generate files for
+# the Architecture/OS specified by the GOARCH and GOOS environment variables.
+# See README.md for more information about how the build system works.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="go run mksyscall.go"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+mkasm=
+run="sh"
+cmd=""
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ # Run the command line that appears in the first line
+ # of the generated file to regenerate it.
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ cmd="echo"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+if [[ "$GOOS" = "linux" ]]; then
+ # Use the Docker-based build system
+ # Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ $cmd docker build --tag generate:$GOOS $GOOS
+ $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
+ exit
+fi
+
+GOOSARCH_in=syscall_$GOOSARCH.go
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+aix_ppc)
+ mkerrors="$mkerrors -maix32"
+ mksyscall="go run mksyscall_aix_ppc.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+aix_ppc64)
+ mkerrors="$mkerrors -maix64"
+ mksyscall="go run mksyscall_aix_ppc64.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_amd64)
+ mkerrors="$mkerrors -m64"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm.go"
+ ;;
+darwin_arm64)
+ mkerrors="$mkerrors -m64"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm.go"
+ ;;
+dragonfly_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -dragonfly"
+ mksysnum="go run mksysnum.go 'https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -arm"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+freebsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+freebsd_riscv64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+netbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -netbsd -arm"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+netbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_386)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_amd64)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_arm)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_arm64)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_mips64)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_ppc64)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_riscv64)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+solaris_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors="$mkerrors -m64"
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+illumos_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors=
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ *)
+ syscall_goos="syscall_$GOOS.go"
+ case "$GOOS" in
+ darwin | dragonfly | freebsd | netbsd | openbsd)
+ syscall_goos="syscall_bsd.go $syscall_goos"
+ ;;
+ esac
+ if [ -n "$mksyscall" ]; then
+ if [ "$GOOSARCH" == "aix_ppc64" ]; then
+ # aix/ppc64 script generates files instead of writing to stdin.
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
+ elif [ "$GOOS" == "illumos" ]; then
+ # illumos code generation requires a --illumos switch
+ echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
+ # illumos implies solaris, so solaris code generation is also required
+ echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go";
+ else
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ fi
+ fi
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
+ if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi
+) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
new file mode 100644
index 0000000..6ab02b6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -0,0 +1,805 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+if test -z "$GOARCH" -o -z "$GOOS"; then
+ echo 1>&2 "GOARCH or GOOS not defined in environment"
+ exit 1
+fi
+
+# Check that we are using the new build system if we should
+if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
+ echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
+ echo 1>&2 "See README.md"
+ exit 1
+fi
+
+if [[ "$GOOS" = "aix" ]]; then
+ CC=${CC:-gcc}
+else
+ CC=${CC:-cc}
+fi
+
+if [[ "$GOOS" = "solaris" ]]; then
+ # Assumes GNU versions of utilities in PATH.
+ export PATH=/usr/gnu/bin:$PATH
+fi
+
+uname=$(uname)
+
+includes_AIX='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define AF_LOCAL AF_UNIX
+'
+
+includes_Darwin='
+#define _DARWIN_C_SOURCE
+#define KERNEL 1
+#define _DARWIN_USE_64_BIT_INODE
+#define __APPLE_USE_RFC_3542
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+// for backwards compatibility because moved TIOCREMOTE to Kernel.framework after MacOSX12.0.sdk.
+#define TIOCREMOTE 0x80047469
+'
+
+includes_DragonFly='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_FreeBSD='
+#include
+#include
+#include
+#include